}
func peep(firstp *obj.Prog) {
- g := (*gc.Graph)(gc.Flowstart(firstp, nil))
+ g := gc.Flowstart(firstp, nil)
if g == nil {
return
}
// another MOV $con,R without
// setting R in the interim
var p *obj.Prog
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
case x86.ALEAL,
// can be replaced by MOVAPD, which moves the pair of float64s
// instead of just the lower one. We only use the lower one, but
// the processor can do better if we do moves using both.
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
if p.As == x86.AMOVLQZX {
if regtyp(&p.From) {
// load pipelining
// push any load from memory as early as possible
// to give it time to complete before use.
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
case x86.AMOVB,
var p *obj.Prog
var b *gc.Flow
- p0 := (*obj.Prog)(r0.Prog)
+ p0 := r0.Prog
for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) {
p = r.Prog
if p.As != obj.ANOP {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("pushback\n")
- for r := (*gc.Flow)(b); ; r = r.Link {
+ for r := b; ; r = r.Link {
fmt.Printf("\t%v\n", r.Prog)
if r == r0 {
break
}
}
- t := obj.Prog(*r0.Prog)
+ t := *r0.Prog
for r = gc.Uniqp(r0); ; r = gc.Uniqp(r) {
p0 = r.Link.Prog
p = r.Prog
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tafter\n")
- for r := (*gc.Flow)(b); ; r = r.Link {
+ for r := b; ; r = r.Link {
fmt.Printf("\t%v\n", r.Prog)
if r == r0 {
break
}
func excise(r *gc.Flow) {
- p := (*obj.Prog)(r.Prog)
+ p := r.Prog
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("%v ===delete===\n", p)
}
func elimshortmov(g *gc.Graph) {
var p *obj.Prog
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
if regtyp(&p.To) {
switch p.As {
// is reg guaranteed to be truncated by a previous L instruction?
func prevl(r0 *gc.Flow, reg int) bool {
- for r := (*gc.Flow)(gc.Uniqp(r0)); r != nil; r = gc.Uniqp(r) {
+ for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
p := r.Prog
if p.To.Type == obj.TYPE_REG && int(p.To.Reg) == reg {
flags := progflags(p)
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("subprop %v\n", r0.Prog)
}
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
+ p := r0.Prog
+ v1 := &p.From
if !regtyp(v1) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v1))
return false
}
- v2 := (*obj.Addr)(&p.To)
+ v2 := &p.To
if !regtyp(v2) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v2))
}
}
- t := int(int(v1.Reg))
+ t := int(v1.Reg)
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("copyprop %v\n", r0.Prog)
}
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
- v2 := (*obj.Addr)(&p.To)
+ p := r0.Prog
+ v1 := &p.From
+ v2 := &p.To
if copyas(v1, v2) {
return true
}
*/
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
if copyas(a, v) {
- reg := int(int(s.Reg))
+ reg := int(s.Reg)
if reg >= x86.REG_AX && reg <= x86.REG_R15 || reg >= x86.REG_X0 && reg <= x86.REG_X0+15 {
if f != 0 {
a.Reg = int16(reg)
}
if regtyp(v) {
- reg := int(int(v.Reg))
+ reg := int(v.Reg)
if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
if (s.Reg == x86.REG_BP || s.Reg == x86.REG_R13) && a.Index != x86.REG_NONE {
return 1 /* can't use BP-base with index */
var p *obj.Prog
var t int
- p0 := (*obj.Prog)(r0.Prog)
- v0 := (*obj.Addr)(&p0.To)
- r := (*gc.Flow)(r0)
+ p0 := r0.Prog
+ v0 := &p0.To
+ r := r0
loop:
r = gc.Uniqs(r)
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
dir := align
- if osrc < odst && int64(odst) < int64(osrc)+w {
+ if osrc < odst && odst < osrc+w {
dir = -dir
}
// UNUSED
func peep(firstp *obj.Prog) {
- g := (*gc.Graph)(gc.Flowstart(firstp, nil))
+ g := gc.Flowstart(firstp, nil)
if g == nil {
return
}
goto loop1
}
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
/*
}
}
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
case arm.AMOVW,
* will be eliminated by copy propagation.
*/
func subprop(r0 *gc.Flow) bool {
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
+ p := r0.Prog
+ v1 := &p.From
if !regtyp(v1) {
return false
}
- v2 := (*obj.Addr)(&p.To)
+ v2 := &p.To
if !regtyp(v2) {
return false
}
}
}
- t := int(int(v1.Reg))
+ t := int(v1.Reg)
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
* set v2 return success
*/
func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
- v2 := (*obj.Addr)(&p.To)
+ p := r0.Prog
+ v1 := &p.From
+ v2 := &p.To
if copyas(v1, v2) {
return true
}
* MOVBS above can be a MOVBS, MOVBU, MOVHS or MOVHU.
*/
func shortprop(r *gc.Flow) bool {
- p := (*obj.Prog)(r.Prog)
- r1 := (*gc.Flow)(findpre(r, &p.From))
+ p := r.Prog
+ r1 := findpre(r, &p.From)
if r1 == nil {
return false
}
- p1 := (*obj.Prog)(r1.Prog)
+ p1 := r1.Prog
if p1.As == p.As {
// Two consecutive extensions.
goto gotit
* ..
*/
func shiftprop(r *gc.Flow) bool {
- p := (*obj.Prog)(r.Prog)
+ p := r.Prog
if p.To.Type != obj.TYPE_REG {
if gc.Debug['P'] != 0 {
fmt.Printf("\tBOTCH: result not reg; FAILURE\n")
return false
}
- n := int(int(p.To.Reg))
- a := obj.Addr(obj.Addr{})
+ n := int(p.To.Reg)
+ a := obj.Addr{}
if p.Reg != 0 && p.Reg != p.To.Reg {
a.Type = obj.TYPE_REG
a.Reg = p.Reg
if gc.Debug['P'] != 0 {
fmt.Printf("shiftprop\n%v", p)
}
- r1 := (*gc.Flow)(r)
+ r1 := r
var p1 *obj.Prog
for {
/* find first use of shift result; abort if shift operands or result are changed */
}
/* check whether shift result is used subsequently */
- p2 := (*obj.Prog)(p1)
+ p2 := p1
if int(p1.To.Reg) != n {
var p1 *obj.Prog
/* make the substitution */
p2.From.Reg = 0
- o := int(int(p.Reg))
+ o := int(p.Reg)
if o == 0 {
o = int(p.To.Reg)
}
}
func finduse(g *gc.Graph, r *gc.Flow, v *obj.Addr) bool {
- for r1 := (*gc.Flow)(g.Start); r1 != nil; r1 = r1.Link {
+ for r1 := g.Start; r1 != nil; r1 = r1.Link {
r1.Active = 0
}
return findu1(r, v)
* MOVBU R0<<0(R1),R0
*/
func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) bool {
- p := (*obj.Prog)(r.Prog)
- v := obj.Addr(*a)
+ p := r.Prog
+ v := *a
v.Type = obj.TYPE_REG
- r1 := (*gc.Flow)(findpre(r, &v))
+ r1 := findpre(r, &v)
if r1 != nil {
p1 := r1.Prog
if p1.To.Type == obj.TYPE_REG && p1.To.Reg == v.Reg {
case arm.AMOVW:
if p1.From.Type == obj.TYPE_REG {
- r2 := (*gc.Flow)(findinc(r1, r, &p1.From))
+ r2 := findinc(r1, r, &p1.From)
if r2 != nil {
var r3 *gc.Flow
for r3 = gc.Uniqs(r2); r3.Prog.As == obj.ANOP; r3 = gc.Uniqs(r3) {
}
if a != &p.From || a.Reg != p.To.Reg {
- r1 := (*gc.Flow)(findinc(r, nil, &v))
+ r1 := findinc(r, nil, &v)
if r1 != nil {
/* post-indexing */
p1 := r1.Prog
pred = predinfo[rstart.Prog.As-arm.ABEQ].notscond
}
- for r := (*gc.Flow)(j.start); ; r = successor(r) {
+ for r := j.start; ; r = successor(r) {
if r.Prog.As == arm.AB {
if r != j.last || branch == Delbranch {
excise(r)
var j1 Joininfo
var j2 Joininfo
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
if isbranch(r.Prog) {
t1 = joinsplit(r.S1, &j1)
t2 = joinsplit(r.S2, &j2)
}
func excise(r *gc.Flow) {
- p := (*obj.Prog)(r.Prog)
+ p := r.Prog
obj.Nopout(p)
}
// the src and dst overlap, then reverse direction
dir := align
- if osrc < odst && int64(odst) < int64(osrc)+w {
+ if osrc < odst && odst < osrc+w {
dir = -dir
}
nl, nr = nr, nl
}
- t := (*gc.Type)(nl.Type)
- w := int(int(t.Width * 8))
+ t := nl.Type
+ w := int(t.Width * 8)
var n1 gc.Node
gc.Cgenr(nl, &n1, res)
var n2 gc.Node
gc.TINT16,
gc.TINT32:
gins(optoas(gc.OMUL, t), &n2, &n1)
- p := (*obj.Prog)(gins(arm64.AASR, nil, &n1))
+ p := gins(arm64.AASR, nil, &n1)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(w)
gc.TUINT16,
gc.TUINT32:
gins(optoas(gc.OMUL, t), &n2, &n1)
- p := (*obj.Prog)(gins(arm64.ALSR, nil, &n1))
+ p := gins(arm64.ALSR, nil, &n1)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(w)
* res = nl >> nr
*/
func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- a := int(optoas(op, nl.Type))
+ a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
var n1 gc.Node
if !bounded {
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gcmp(optoas(gc.OCMP, tcount), &n1, &n3)
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
+ p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n2)
fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
}
- w := uint64(uint64(nl.Type.Width))
+ w := uint64(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
- c := uint64(w % 8) // bytes
- q := uint64(w / 8) // dwords
+ c := w % 8 // bytes
+ q := w / 8 // dwords
var r0 gc.Node
gc.Nodreg(&r0, gc.Types[gc.TUINT64], arm64.REGZERO)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 8
p.Scond = arm64.C_XPRE
- pl := (*obj.Prog)(p)
+ pl := p
p = gcmp(arm64.ACMP, &dst, &end)
gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), pl)
p := gins(arm64.ASUB, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
- f := (*gc.Node)(gc.Sysfunc("duffzero"))
+ f := gc.Sysfunc("duffzero")
p = gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
func expandchecks(firstp *obj.Prog) {
var p1 *obj.Prog
- for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
+ for p := firstp; p != nil; p = p.Link {
if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
fmt.Printf("expandchecks: %v\n", p)
}
ft := int(gc.Simsimtype(f.Type))
tt := int(gc.Simsimtype(t.Type))
- cvt := (*gc.Type)(t.Type)
+ cvt := t.Type
if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
gc.Complexmove(f, t)
var gactive uint32
func peep(firstp *obj.Prog) {
- g := (*gc.Graph)(gc.Flowstart(firstp, nil))
+ g := gc.Flowstart(firstp, nil)
if g == nil {
return
}
*/
var p1 *obj.Prog
var r1 *gc.Flow
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
default:
}
// MOVD $c, R'; ADD R', R (R' unused) -> ADD $c, R
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
default:
}
func excise(r *gc.Flow) {
- p := (*obj.Prog)(r.Prog)
+ p := r.Prog
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("%v ===delete===\n", p)
}
* above sequences. This returns 1 if it modified any instructions.
*/
func subprop(r0 *gc.Flow) bool {
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
+ p := r0.Prog
+ v1 := &p.From
if !regtyp(v1) {
return false
}
- v2 := (*obj.Addr)(&p.To)
+ v2 := &p.To
if !regtyp(v2) {
return false
}
}
}
- t := int(int(v1.Reg))
+ t := int(v1.Reg)
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
* set v2 return success (caller can remove v1->v2 move)
*/
func copyprop(r0 *gc.Flow) bool {
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
- v2 := (*obj.Addr)(&p.To)
+ p := r0.Prog
+ v1 := &p.From
+ v2 := &p.To
if copyas(v1, v2) {
if gc.Debug['P'] != 0 {
fmt.Printf("eliminating self-move: %v\n", r0.Prog)
func excludedregs() uint64 {
// Exclude registers with fixed functions
- regbits := uint64(RtoB(arm64.REGRT1) | RtoB(arm64.REGRT2) | RtoB(arm64.REGPR))
+ regbits := RtoB(arm64.REGRT1) | RtoB(arm64.REGRT2) | RtoB(arm64.REGPR)
// Exclude R26 - R31.
for r := arm64.REGMAX + 1; r <= arm64.REGZERO; r++ {
} else if w == 1 {
Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &r2, &r1)
} else {
- Thearch.Ginscon(Thearch.Optoas(OMUL, Types[TUINT]), int64(w), &r2)
+ Thearch.Ginscon(Thearch.Optoas(OMUL, Types[TUINT]), w, &r2)
Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &r2, &r1)
}
Regfree(&r2)
} // it's a colas, so must not re-use an oldname
return ts
}
- return colas(lhs, rhs, int32(lno))
+ return colas(lhs, rhs, lno)
default:
p.syntax_error("expecting := or = or comma")
// done in casebody()
markdcl() // matching popdcl in caseblock
stmt := Nod(OXCASE, nil, nil)
- stmt.List = list1(colas(cases, list1(rhs), int32(lno)))
+ stmt.List = list1(colas(cases, list1(rhs), lno))
p.want(':') // consume ':' after declaring select cases for correct lineno
return stmt
allocauto(ptxt)
if false {
- fmt.Printf("allocauto: %d to %d\n", oldstksize, int64(Stksize))
+ fmt.Printf("allocauto: %d to %d\n", oldstksize, Stksize)
}
setlineno(Curfn)
- if int64(Stksize)+Maxarg > 1<<31 {
+ if Stksize+Maxarg > 1<<31 {
Yyerror("stack frame too large (>2GB)")
goto ret
}
// Add missing successor edges to the selectgo blocks.
if len(selectgo) != 0 {
- fixselectgo([]*BasicBlock(selectgo))
+ fixselectgo(selectgo)
}
// Find a depth-first order and assign a depth-first number to
}
fmt.Printf("\n")
- printvars("\tuevar", bb.uevar, []*Node(lv.vars))
- printvars("\tvarkill", bb.varkill, []*Node(lv.vars))
- printvars("\tlivein", bb.livein, []*Node(lv.vars))
- printvars("\tliveout", bb.liveout, []*Node(lv.vars))
- printvars("\tavarinit", bb.avarinit, []*Node(lv.vars))
- printvars("\tavarinitany", bb.avarinitany, []*Node(lv.vars))
- printvars("\tavarinitall", bb.avarinitall, []*Node(lv.vars))
+ printvars("\tuevar", bb.uevar, lv.vars)
+ printvars("\tvarkill", bb.varkill, lv.vars)
+ printvars("\tlivein", bb.livein, lv.vars)
+ printvars("\tliveout", bb.liveout, lv.vars)
+ printvars("\tavarinit", bb.avarinit, lv.vars)
+ printvars("\tavarinitany", bb.avarinitany, lv.vars)
+ printvars("\tavarinitall", bb.avarinitall, lv.vars)
fmt.Printf("\tprog:\n")
for prog := bb.first; ; prog = prog.Link {
// Walk the block instructions backward and update the block
// effects with the each prog effects.
for p := bb.last; p != nil; p = p.Opt.(*obj.Prog) {
- progeffects(p, []*Node(lv.vars), uevar, varkill, avarinit)
+ progeffects(p, lv.vars, uevar, varkill, avarinit)
if debuglive >= 3 {
printeffects(p, uevar, varkill, avarinit)
}
bvresetall(varkill)
for p := bb.first; ; p = p.Link {
- progeffects(p, []*Node(lv.vars), uevar, varkill, avarinit)
+ progeffects(p, lv.vars, uevar, varkill, avarinit)
if debuglive >= 3 {
printeffects(p, uevar, varkill, avarinit)
}
// allocate liveness maps for those instructions that need them.
// Seed the maps with information about the addrtaken variables.
for p = bb.first; ; p = p.Link {
- progeffects(p, []*Node(lv.vars), uevar, varkill, avarinit)
+ progeffects(p, lv.vars, uevar, varkill, avarinit)
bvandnot(any, any, varkill)
bvandnot(all, all, varkill)
bvor(any, any, avarinit)
cfg := newcfg(firstp)
if debuglive >= 3 {
- printcfg([]*BasicBlock(cfg))
+ printcfg(cfg)
}
vars := getvariables(fn)
lv := newliveness(fn, firstp, cfg, vars)
}
freeliveness(lv)
- freecfg([]*BasicBlock(cfg))
+ freecfg(cfg)
debuglive -= debugdelta
}
} else {
fmt.Fprintf(&buf, "%s(%d)", v.node.Sym.Name, i)
if v.offset != 0 {
- fmt.Fprintf(&buf, "%+d", int64(v.offset))
+ fmt.Fprintf(&buf, "%+d", v.offset)
}
}
biclr(&bits, uint(i))
return
}
old := fmt.Sprintf("%v: undefined: %v\n", n.Line(), n.Left)
- if len(errors) > 0 && int32(errors[len(errors)-1].lineno) == n.Lineno && errors[len(errors)-1].msg == old {
+ if len(errors) > 0 && errors[len(errors)-1].lineno == n.Lineno && errors[len(errors)-1].msg == old {
errors[len(errors)-1].msg = fmt.Sprintf("%v: undefined: %v in %v\n", n.Line(), n.Left, n)
}
}
return true
}
- if unicode.IsSpace(rune(r)) {
+ if unicode.IsSpace(r) {
Yyerror("import path contains space character: %q", path)
return true
}
if t.Etype != TARRAY {
panic("NumElem on non-TARRAY")
}
- return int64(t.Bound)
+ return t.Bound
}
func (t *Type) IsMemory() bool { return false }
// the src and dst overlap, then reverse direction
dir := align
- if osrc < odst && int64(odst) < int64(osrc)+w {
+ if osrc < odst && odst < osrc+w {
dir = -dir
}
nl, nr = nr, nl
}
- t := (*gc.Type)(nl.Type)
- w := int(int(t.Width * 8))
+ t := nl.Type
+ w := int(t.Width * 8)
var n1 gc.Node
gc.Cgenr(nl, &n1, res)
var n2 gc.Node
var lo gc.Node
gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
gins(mips.AMOVV, &lo, &n1)
- p := (*obj.Prog)(gins(mips.ASRAV, nil, &n1))
+ p := gins(mips.ASRAV, nil, &n1)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(w)
var lo gc.Node
gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
gins(mips.AMOVV, &lo, &n1)
- p := (*obj.Prog)(gins(mips.ASRLV, nil, &n1))
+ p := gins(mips.ASRLV, nil, &n1)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(w)
* res = nl >> nr
*/
func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- a := int(optoas(op, nl.Type))
+ a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
var n1 gc.Node
fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
}
- w := uint64(uint64(nl.Type.Width))
+ w := uint64(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
- c := uint64(w % 8) // bytes
- q := uint64(w / 8) // dwords
+ c := w % 8 // bytes
+ q := w / 8 // dwords
if gc.Reginuse(mips.REGRT1) {
gc.Fatalf("%v in use during clearfat", obj.Rconv(mips.REGRT1))
p = gins(mips.AMOVV, &r0, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 8
- pl := (*obj.Prog)(p)
+ pl := p
p = gins(mips.AADDV, nil, &dst)
p.From.Type = obj.TYPE_CONST
p := gins(mips.ASUBV, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
- f := (*gc.Node)(gc.Sysfunc("duffzero"))
+ f := gc.Sysfunc("duffzero")
p = gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
func expandchecks(firstp *obj.Prog) {
var p1 *obj.Prog
- for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
+ for p := firstp; p != nil; p = p.Link {
if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
fmt.Printf("expandchecks: %v\n", p)
}
ft := int(gc.Simsimtype(f.Type))
tt := int(gc.Simsimtype(t.Type))
- cvt := (*gc.Type)(t.Type)
+ cvt := t.Type
if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
gc.Complexmove(f, t)
var gactive uint32
func peep(firstp *obj.Prog) {
- g := (*gc.Graph)(gc.Flowstart(firstp, nil))
+ g := gc.Flowstart(firstp, nil)
if g == nil {
return
}
*/
var p1 *obj.Prog
var r1 *gc.Flow
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
default:
}
func excise(r *gc.Flow) {
- p := (*obj.Prog)(r.Prog)
+ p := r.Prog
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("%v ===delete===\n", p)
}
* above sequences. This returns 1 if it modified any instructions.
*/
func subprop(r0 *gc.Flow) bool {
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
+ p := r0.Prog
+ v1 := &p.From
if !regtyp(v1) {
return false
}
- v2 := (*obj.Addr)(&p.To)
+ v2 := &p.To
if !regtyp(v2) {
return false
}
}
}
- t := int(int(v1.Reg))
+ t := int(v1.Reg)
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
* set v2 return success (caller can remove v1->v2 move)
*/
func copyprop(r0 *gc.Flow) bool {
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
- v2 := (*obj.Addr)(&p.To)
+ p := r0.Prog
+ v1 := &p.From
+ v2 := &p.To
if copyas(v1, v2) {
if gc.Debug['P'] != 0 {
fmt.Printf("eliminating self-move: %v\n", r0.Prog)
func excludedregs() uint64 {
// Exclude registers with fixed functions
- regbits := uint64(1<<0 | RtoB(mips.REGSP) | RtoB(mips.REGG) | RtoB(mips.REGTMP) | RtoB(mips.REGLINK) | RtoB(mips.REG_R26) | RtoB(mips.REG_R27))
+ regbits := 1<<0 | RtoB(mips.REGSP) | RtoB(mips.REGG) | RtoB(mips.REGTMP) | RtoB(mips.REGLINK) | RtoB(mips.REG_R26) | RtoB(mips.REG_R27)
// Also exclude floating point registers with fixed constants
regbits |= RtoB(mips.FREGZERO) | RtoB(mips.FREGHALF) | RtoB(mips.FREGONE) | RtoB(mips.FREGTWO)
// the src and dst overlap, then reverse direction
dir := align
- if osrc < odst && int64(odst) < int64(osrc)+w {
+ if osrc < odst && odst < osrc+w {
dir = -dir
}
nl, nr = nr, nl
}
- t := (*gc.Type)(nl.Type)
- w := int(int(t.Width * 8))
+ t := nl.Type
+ w := int(t.Width * 8)
var n1 gc.Node
gc.Cgenr(nl, &n1, res)
var n2 gc.Node
gc.TINT16,
gc.TINT32:
gins(optoas(gc.OMUL, t), &n2, &n1)
- p := (*obj.Prog)(gins(ppc64.ASRAD, nil, &n1))
+ p := gins(ppc64.ASRAD, nil, &n1)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(w)
gc.TUINT16,
gc.TUINT32:
gins(optoas(gc.OMUL, t), &n2, &n1)
- p := (*obj.Prog)(gins(ppc64.ASRD, nil, &n1))
+ p := gins(ppc64.ASRD, nil, &n1)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(w)
* res = nl >> nr
*/
func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- a := int(optoas(op, nl.Type))
+ a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
var n1 gc.Node
if !bounded {
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gins(optoas(gc.OCMP, tcount), &n1, &n3)
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
+ p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n2)
fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
}
- w := uint64(uint64(nl.Type.Width))
+ w := uint64(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
- c := uint64(w % 8) // bytes
- q := uint64(w / 8) // dwords
+ c := w % 8 // bytes
+ q := w / 8 // dwords
if gc.Reginuse(ppc64.REGRT1) {
gc.Fatalf("%v in use during clearfat", obj.Rconv(ppc64.REGRT1))
p = gins(ppc64.AMOVDU, &r0, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 8
- pl := (*obj.Prog)(p)
+ pl := p
p = gins(ppc64.ACMP, &dst, &end)
gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
p := gins(ppc64.ASUB, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
- f := (*gc.Node)(gc.Sysfunc("duffzero"))
+ f := gc.Sysfunc("duffzero")
p = gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
var p1 *obj.Prog
var p2 *obj.Prog
- for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
+ for p := firstp; p != nil; p = p.Link {
if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
fmt.Printf("expandchecks: %v\n", p)
}
ft := int(gc.Simsimtype(f.Type))
tt := int(gc.Simsimtype(t.Type))
- cvt := (*gc.Type)(t.Type)
+ cvt := t.Type
if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
gc.Complexmove(f, t)
gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
gmove(&bigf, &r2)
gins(ppc64.AFCMPU, &r1, &r2)
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1))
+ p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)
gins(ppc64.AFSUB, &r2, &r1)
gc.Patch(p1, gc.Pc)
gc.Regfree(&r2)
var r3 gc.Node
gc.Regalloc(&r3, gc.Types[gc.TINT64], t)
gins(ppc64.AFCTIDZ, &r1, &r2)
- p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil))
+ p1 := gins(ppc64.AFMOVD, &r2, nil)
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = ppc64.REGSP
p1.To.Offset = -8
gc.Regfree(&r2)
gc.Regfree(&r1)
if tt == gc.TUINT64 {
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again
+ p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1) // use CR0 here again
gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
gins(ppc64.AMOVD, &bigi, &r1)
gins(ppc64.AADD, &r1, &r3)
gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP)
gmove(&bigi, &r2)
gins(ppc64.ACMPU, &r1, &r2)
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1))
- p2 := (*obj.Prog)(gins(ppc64.ASRD, nil, &r1))
+ p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
+ p2 := gins(ppc64.ASRD, nil, &r1)
p2.From.Type = obj.TYPE_CONST
p2.From.Offset = 1
gc.Patch(p1, gc.Pc)
}
gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t)
- p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil))
+ p1 := gins(ppc64.AMOVD, &r1, nil)
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = ppc64.REGSP
p1.To.Offset = -8
gins(ppc64.AFCFID, &r2, &r2)
gc.Regfree(&r1)
if ft == gc.TUINT64 {
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again
+ p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1) // use CR0 here again
gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO)
gins(ppc64.AFMUL, &r1, &r2)
gc.Patch(p1, gc.Pc)
var gactive uint32
func peep(firstp *obj.Prog) {
- g := (*gc.Graph)(gc.Flowstart(firstp, nil))
+ g := gc.Flowstart(firstp, nil)
if g == nil {
return
}
*/
var p1 *obj.Prog
var r1 *gc.Flow
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
default:
* look for OP x,y,R; CMP R, $0 -> OPCC x,y,R
* when OP can set condition codes correctly
*/
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
case ppc64.ACMP,
}
func excise(r *gc.Flow) {
- p := (*obj.Prog)(r.Prog)
+ p := r.Prog
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("%v ===delete===\n", p)
}
* above sequences. This returns 1 if it modified any instructions.
*/
func subprop(r0 *gc.Flow) bool {
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
+ p := r0.Prog
+ v1 := &p.From
if !regtyp(v1) {
return false
}
- v2 := (*obj.Addr)(&p.To)
+ v2 := &p.To
if !regtyp(v2) {
return false
}
}
}
- t := int(int(v1.Reg))
+ t := int(v1.Reg)
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
* set v2 return success (caller can remove v1->v2 move)
*/
func copyprop(r0 *gc.Flow) bool {
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
- v2 := (*obj.Addr)(&p.To)
+ p := r0.Prog
+ v1 := &p.From
+ v2 := &p.To
if copyas(v1, v2) {
if gc.Debug['P'] != 0 {
fmt.Printf("eliminating self-move: %v\n", r0.Prog)
func excludedregs() uint64 {
// Exclude registers with fixed functions
- regbits := uint64(1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS) | RtoB(ppc64.REGTMP))
+ regbits := 1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS) | RtoB(ppc64.REGTMP)
if gc.Ctxt.Flag_shared != 0 {
// When compiling Go into PIC, R2 is reserved to be the TOC pointer
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
- if osrc < odst && int64(odst) < int64(osrc)+w {
+ if osrc < odst && odst < osrc+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {