"cmd/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/arm"
- "fmt"
)
-/*
- * peep.c
- */
-/*
- * generate:
- * res = n;
- * simplifies and calls gmove.
- */
-func cgen(n *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\ncgen-n", n)
- gc.Dump("cgen-res", res)
- }
-
- if n == nil || n.Type == nil {
- return
- }
-
- if res == nil || res.Type == nil {
- gc.Fatal("cgen: res nil")
- }
-
- switch n.Op {
- case gc.OSLICE,
- gc.OSLICEARR,
- gc.OSLICESTR,
- gc.OSLICE3,
- gc.OSLICE3ARR:
- if res.Op != gc.ONAME || res.Addable == 0 {
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_slice(n, &n1)
- cgen(&n1, res)
- } else {
- gc.Cgen_slice(n, res)
- }
- return
-
- case gc.OEFACE:
- if res.Op != gc.ONAME || res.Addable == 0 {
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_eface(n, &n1)
- cgen(&n1, res)
- } else {
- gc.Cgen_eface(n, res)
- }
- return
- }
-
- for n.Op == gc.OCONVNOP {
- n = n.Left
- }
-
- if n.Ullman >= gc.UINF {
- if n.Op == gc.OINDREG {
- gc.Fatal("cgen: this is going to misscompile")
- }
- if res.Ullman >= gc.UINF {
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- cgen(n, &n1)
- cgen(&n1, res)
- return
- }
- }
-
- if gc.Isfat(n.Type) {
- if n.Type.Width < 0 {
- gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
- }
- sgen(n, res, n.Type.Width)
- return
- }
-
- // update addressability for string, slice
- // can't do in walk because n->left->addable
- // changes if n->left is an escaping local variable.
- switch n.Op {
- case gc.OSPTR,
- gc.OLEN:
- if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
- n.Addable = n.Left.Addable
- }
-
- case gc.OCAP:
- if gc.Isslice(n.Left.Type) {
- n.Addable = n.Left.Addable
- }
-
- case gc.OITAB:
- n.Addable = n.Left.Addable
- }
-
- // if both are addressable, move
- if n.Addable != 0 && res.Addable != 0 {
- if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Op == gc.OREGISTER || res.Op == gc.OREGISTER || gc.Iscomplex[n.Type.Etype] || gc.Iscomplex[res.Type.Etype] {
- gmove(n, res)
- } else {
- var n1 gc.Node
- regalloc(&n1, n.Type, nil)
- gmove(n, &n1)
- cgen(&n1, res)
- regfree(&n1)
- }
-
- return
- }
-
- // if both are not addressable, use a temporary.
- if n.Addable == 0 && res.Addable == 0 {
- // could use regalloc here sometimes,
- // but have to check for ullman >= UINF.
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
-
- cgen(n, &n1)
- cgen(&n1, res)
- return
- }
-
- // if result is not addressable directly but n is,
- // compute its address and then store via the address.
- if res.Addable == 0 {
- var n1 gc.Node
- igen(res, &n1, nil)
- cgen(n, &n1)
- regfree(&n1)
- return
- }
-
- if gc.Complexop(n, res) {
- gc.Complexgen(n, res)
- return
- }
-
- // if n is sudoaddable generate addr and move
- if !gc.Is64(n.Type) && !gc.Is64(res.Type) && !gc.Iscomplex[n.Type.Etype] && !gc.Iscomplex[res.Type.Etype] {
- a := optoas(gc.OAS, n.Type)
- var w int
- var addr obj.Addr
- if sudoaddable(a, n, &addr, &w) {
- if res.Op != gc.OREGISTER {
- var n2 gc.Node
- regalloc(&n2, res.Type, nil)
- p1 := gins(a, nil, &n2)
- p1.From = addr
- if gc.Debug['g'] != 0 {
- fmt.Printf("%v [ignore previous line]\n", p1)
- }
- gmove(&n2, res)
- regfree(&n2)
- } else {
- p1 := gins(a, nil, res)
- p1.From = addr
- if gc.Debug['g'] != 0 {
- fmt.Printf("%v [ignore previous line]\n", p1)
- }
- }
-
- sudoclean()
- return
- }
- }
-
- // otherwise, the result is addressable but n is not.
- // let's do some computation.
-
- nl := n.Left
-
- nr := n.Right
-
- if nl != nil && nl.Ullman >= gc.UINF {
- if nr != nil && nr.Ullman >= gc.UINF {
- var n1 gc.Node
- gc.Tempname(&n1, nl.Type)
- cgen(nl, &n1)
- n2 := *n
- n2.Left = &n1
- cgen(&n2, res)
- return
- }
- }
-
- // 64-bit ops are hard on 32-bit machine.
- if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Left != nil && gc.Is64(n.Left.Type) {
- switch n.Op {
- // math goes to cgen64.
- case gc.OMINUS,
- gc.OCOM,
- gc.OADD,
- gc.OSUB,
- gc.OMUL,
- gc.OLROT,
- gc.OLSH,
- gc.ORSH,
- gc.OAND,
- gc.OOR,
- gc.OXOR:
- cgen64(n, res)
-
- return
- }
- }
-
- var a int
- var f0 gc.Node
- var n1 gc.Node
- var n2 gc.Node
- if nl != nil && gc.Isfloat[n.Type.Etype] && gc.Isfloat[nl.Type.Etype] {
- // floating-point.
- regalloc(&f0, nl.Type, res)
-
- if nr != nil {
- goto flt2
- }
-
- if n.Op == gc.OMINUS {
- nr = gc.Nodintconst(-1)
- gc.Convlit(&nr, n.Type)
- n.Op = gc.OMUL
- goto flt2
- }
-
- // unary
- cgen(nl, &f0)
-
- if n.Op != gc.OCONV && n.Op != gc.OPLUS {
- gins(optoas(int(n.Op), n.Type), &f0, &f0)
- }
- gmove(&f0, res)
- regfree(&f0)
- return
- }
- switch n.Op {
- default:
- gc.Dump("cgen", n)
- gc.Fatal("cgen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
-
- case gc.OREAL,
- gc.OIMAG,
- gc.OCOMPLEX:
- gc.Fatal("unexpected complex")
-
- // these call bgen to get a bool value
- case gc.OOROR,
- gc.OANDAND,
- gc.OEQ,
- gc.ONE,
- gc.OLT,
- gc.OLE,
- gc.OGE,
- gc.OGT,
- gc.ONOT:
- p1 := gc.Gbranch(arm.AB, nil, 0)
-
- p2 := gc.Pc
- gmove(gc.Nodbool(true), res)
- p3 := gc.Gbranch(arm.AB, nil, 0)
- gc.Patch(p1, gc.Pc)
- bgen(n, true, 0, p2)
- gmove(gc.Nodbool(false), res)
- gc.Patch(p3, gc.Pc)
- return
-
- case gc.OPLUS:
- cgen(nl, res)
- return
-
- // unary
- case gc.OCOM:
- a := optoas(gc.OXOR, nl.Type)
-
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
- gc.Nodconst(&n2, nl.Type, -1)
- gins(a, &n2, &n1)
- goto norm
-
- case gc.OMINUS:
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
- gc.Nodconst(&n2, nl.Type, 0)
- gins(optoas(gc.OMINUS, nl.Type), &n2, &n1)
- goto norm
-
- // symmetric binary
- case gc.OAND,
- gc.OOR,
- gc.OXOR,
- gc.OADD,
- gc.OMUL:
- a = optoas(int(n.Op), nl.Type)
-
- // symmetric binary
- if nl.Ullman < nr.Ullman {
- r := nl
- nl = nr
- nr = r
- }
- goto abop
-
- // asymmetric binary
- case gc.OSUB:
- a = optoas(int(n.Op), nl.Type)
-
- goto abop
-
- case gc.OHMUL:
- cgen_hmul(nl, nr, res)
-
- case gc.OLROT,
- gc.OLSH,
- gc.ORSH:
- cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
-
- case gc.OCONV:
- if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
- cgen(nl, res)
- break
- }
-
- var n1 gc.Node
- if nl.Addable != 0 && !gc.Is64(nl.Type) {
- regalloc(&n1, nl.Type, res)
- gmove(nl, &n1)
- } else {
- if n.Type.Width > int64(gc.Widthptr) || gc.Is64(nl.Type) || gc.Isfloat[nl.Type.Etype] {
- gc.Tempname(&n1, nl.Type)
- } else {
- regalloc(&n1, nl.Type, res)
- }
- cgen(nl, &n1)
- }
-
- var n2 gc.Node
- if n.Type.Width > int64(gc.Widthptr) || gc.Is64(n.Type) || gc.Isfloat[n.Type.Etype] {
- gc.Tempname(&n2, n.Type)
- } else {
- regalloc(&n2, n.Type, nil)
- }
- gmove(&n1, &n2)
- gmove(&n2, res)
- if n1.Op == gc.OREGISTER {
- regfree(&n1)
- }
- if n2.Op == gc.OREGISTER {
- regfree(&n2)
- }
-
- case gc.ODOT,
- gc.ODOTPTR,
- gc.OINDEX,
- gc.OIND,
- gc.ONAME: // PHEAP or PPARAMREF var
- var n1 gc.Node
- igen(n, &n1, res)
-
- gmove(&n1, res)
- regfree(&n1)
-
- // interface table is first word of interface value
- case gc.OITAB:
- var n1 gc.Node
- igen(nl, &n1, res)
-
- n1.Type = n.Type
- gmove(&n1, res)
- regfree(&n1)
-
- // pointer is the first word of string or slice.
- case gc.OSPTR:
- if gc.Isconst(nl, gc.CTSTR) {
- var n1 gc.Node
- regalloc(&n1, gc.Types[gc.Tptr], res)
- p1 := gins(arm.AMOVW, nil, &n1)
- gc.Datastring(nl.Val.U.Sval, &p1.From)
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- var n1 gc.Node
- igen(nl, &n1, res)
- n1.Type = n.Type
- gmove(&n1, res)
- regfree(&n1)
-
- case gc.OLEN:
- if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
- // map has len in the first 32-bit word.
- // a zero pointer means zero length
- var n1 gc.Node
- regalloc(&n1, gc.Types[gc.Tptr], res)
-
- cgen(nl, &n1)
-
- var n2 gc.Node
- gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
- gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
-
- n2 = n1
- n2.Op = gc.OINDREG
- n2.Type = gc.Types[gc.TINT32]
- gmove(&n2, &n1)
-
- gc.Patch(p1, gc.Pc)
-
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
- // both slice and string have len one pointer into the struct.
- var n1 gc.Node
- igen(nl, &n1, res)
-
- n1.Type = gc.Types[gc.TUINT32]
- n1.Xoffset += int64(gc.Array_nel)
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
-
- case gc.OCAP:
- if gc.Istype(nl.Type, gc.TCHAN) {
- // chan has cap in the second 32-bit word.
- // a zero pointer means zero length
- var n1 gc.Node
- regalloc(&n1, gc.Types[gc.Tptr], res)
-
- cgen(nl, &n1)
-
- var n2 gc.Node
- gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
- gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
-
- n2 = n1
- n2.Op = gc.OINDREG
- n2.Xoffset = 4
- n2.Type = gc.Types[gc.TINT32]
- gmove(&n2, &n1)
-
- gc.Patch(p1, gc.Pc)
-
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- if gc.Isslice(nl.Type) {
- var n1 gc.Node
- igen(nl, &n1, res)
- n1.Type = gc.Types[gc.TUINT32]
- n1.Xoffset += int64(gc.Array_cap)
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
-
- case gc.OADDR:
- agen(nl, res)
-
- // Release res so that it is available for cgen_call.
- // Pick it up again after the call.
- case gc.OCALLMETH,
- gc.OCALLFUNC:
- rg := -1
-
- if n.Ullman >= gc.UINF {
- if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
- rg = int(res.Val.U.Reg)
- reg[rg]--
- }
- }
-
- if n.Op == gc.OCALLMETH {
- gc.Cgen_callmeth(n, 0)
- } else {
- cgen_call(n, 0)
- }
- if rg >= 0 {
- reg[rg]++
- }
- cgen_callret(n, res)
-
- case gc.OCALLINTER:
- cgen_callinter(n, res, 0)
- cgen_callret(n, res)
-
- case gc.OMOD,
- gc.ODIV:
- a = optoas(int(n.Op), nl.Type)
- goto abop
- }
-
- return
-
- // TODO(kaib): use fewer registers here.
-abop: // asymmetric binary
- if nl.Ullman >= nr.Ullman {
- regalloc(&n1, nl.Type, res)
- cgen(nl, &n1)
- switch n.Op {
- case gc.OADD,
- gc.OSUB,
- gc.OAND,
- gc.OOR,
- gc.OXOR:
- if gc.Smallintconst(nr) {
- n2 = *nr
- break
- }
- fallthrough
-
- default:
- regalloc(&n2, nr.Type, nil)
- cgen(nr, &n2)
- }
- } else {
- switch n.Op {
- case gc.OADD,
- gc.OSUB,
- gc.OAND,
- gc.OOR,
- gc.OXOR:
- if gc.Smallintconst(nr) {
- n2 = *nr
- break
- }
- fallthrough
-
- default:
- regalloc(&n2, nr.Type, res)
- cgen(nr, &n2)
- }
-
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
- }
-
- gins(a, &n2, &n1)
-
- // Normalize result for types smaller than word.
-norm:
- if n.Type.Width < int64(gc.Widthptr) {
- switch n.Op {
- case gc.OADD,
- gc.OSUB,
- gc.OMUL,
- gc.OCOM,
- gc.OMINUS:
- gins(optoas(gc.OAS, n.Type), &n1, &n1)
- }
- }
-
- gmove(&n1, res)
- regfree(&n1)
- if n2.Op != gc.OLITERAL {
- regfree(&n2)
- }
- return
-
-flt2: // binary
- var f1 gc.Node
- if nl.Ullman >= nr.Ullman {
- cgen(nl, &f0)
- regalloc(&f1, n.Type, nil)
- gmove(&f0, &f1)
- cgen(nr, &f0)
- gins(optoas(int(n.Op), n.Type), &f0, &f1)
- } else {
- cgen(nr, &f0)
- regalloc(&f1, n.Type, nil)
- cgen(nl, &f1)
- gins(optoas(int(n.Op), n.Type), &f0, &f1)
- }
-
- gmove(&f1, res)
- regfree(&f0)
- regfree(&f1)
- return
-}
-
/*
* generate array index into res.
* n might be any size; res is 32-bit.
*/
func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
if !gc.Is64(n.Type) {
- cgen(n, res)
+ gc.Cgen(n, res)
return nil
}
var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
- cgen(n, &tmp)
+ gc.Cgen(n, &tmp)
var lo gc.Node
var hi gc.Node
split64(&tmp, &lo, &hi)
}
var n1 gc.Node
- regalloc(&n1, gc.Types[gc.TINT32], nil)
+ gc.Regalloc(&n1, gc.Types[gc.TINT32], nil)
var n2 gc.Node
- regalloc(&n2, gc.Types[gc.TINT32], nil)
+ gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
var zero gc.Node
gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
gmove(&hi, &n1)
gmove(&zero, &n2)
- gcmp(arm.ACMP, &n1, &n2)
- regfree(&n2)
- regfree(&n1)
+ gins(arm.ACMP, &n1, &n2)
+ gc.Regfree(&n2)
+ gc.Regfree(&n1)
splitclean()
return gc.Gbranch(arm.ABNE, nil, -1)
}
-/*
- * generate:
- * res = &n;
- * The generated code checks that the result is not nil.
- */
-func agen(n *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\nagen-res", res)
- gc.Dump("agen-r", n)
- }
-
- if n == nil || n.Type == nil || res == nil || res.Type == nil {
- gc.Fatal("agen")
- }
-
- for n.Op == gc.OCONVNOP {
- n = n.Left
- }
-
- if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
- // Use of a nil interface or nil slice.
- // Create a temporary we can take the address of and read.
- // The generated code is just going to panic, so it need not
- // be terribly efficient. See issue 3670.
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
-
- gc.Gvardef(&n1)
- clearfat(&n1)
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.Tptr], res)
- gins(arm.AMOVW, &n1, &n2)
- gmove(&n2, res)
- regfree(&n2)
- return
- }
-
- if n.Addable != 0 {
- var n1 gc.Node
- n1.Op = gc.OADDR
- n1.Left = n
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.Tptr], res)
- gins(arm.AMOVW, &n1, &n2)
- gmove(&n2, res)
- regfree(&n2)
- return
- }
-
- nl := n.Left
-
- switch n.Op {
- default:
- gc.Fatal("agen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
-
- // Release res so that it is available for cgen_call.
- // Pick it up again after the call.
- case gc.OCALLMETH,
- gc.OCALLFUNC:
- r := -1
-
- if n.Ullman >= gc.UINF {
- if res.Op == gc.OREGISTER || res.Op == gc.OINDREG {
- r = int(res.Val.U.Reg)
- reg[r]--
- }
- }
-
- if n.Op == gc.OCALLMETH {
- gc.Cgen_callmeth(n, 0)
- } else {
- cgen_call(n, 0)
- }
- if r >= 0 {
- reg[r]++
- }
- cgen_aret(n, res)
-
- case gc.OCALLINTER:
- cgen_callinter(n, res, 0)
- cgen_aret(n, res)
-
- case gc.OSLICE,
- gc.OSLICEARR,
- gc.OSLICESTR,
- gc.OSLICE3,
- gc.OSLICE3ARR:
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_slice(n, &n1)
- agen(&n1, res)
-
- case gc.OEFACE:
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_eface(n, &n1)
- agen(&n1, res)
-
- case gc.OINDEX:
- var n1 gc.Node
- agenr(n, &n1, res)
- gmove(&n1, res)
- regfree(&n1)
-
- // should only get here with names in this func.
- case gc.ONAME:
- if n.Funcdepth > 0 && n.Funcdepth != gc.Funcdepth {
- gc.Dump("bad agen", n)
- gc.Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, gc.Funcdepth)
- }
-
- // should only get here for heap vars or paramref
- if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
- gc.Dump("bad agen", n)
- gc.Fatal("agen: bad ONAME class %#x", n.Class)
- }
-
- cgen(n.Heapaddr, res)
- if n.Xoffset != 0 {
- var n1 gc.Node
- gc.Nodconst(&n1, gc.Types[gc.TINT32], n.Xoffset)
- var n2 gc.Node
- regalloc(&n2, n1.Type, nil)
- var n3 gc.Node
- regalloc(&n3, gc.Types[gc.TINT32], nil)
- gmove(&n1, &n2)
- gmove(res, &n3)
- gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
- gmove(&n3, res)
- regfree(&n2)
- regfree(&n3)
- }
-
- case gc.OIND:
- cgen(nl, res)
- gc.Cgen_checknil(res)
-
- case gc.ODOT:
- agen(nl, res)
- if n.Xoffset != 0 {
- var n1 gc.Node
- gc.Nodconst(&n1, gc.Types[gc.TINT32], n.Xoffset)
- var n2 gc.Node
- regalloc(&n2, n1.Type, nil)
- var n3 gc.Node
- regalloc(&n3, gc.Types[gc.TINT32], nil)
- gmove(&n1, &n2)
- gmove(res, &n3)
- gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
- gmove(&n3, res)
- regfree(&n2)
- regfree(&n3)
- }
-
- case gc.ODOTPTR:
- cgen(nl, res)
- gc.Cgen_checknil(res)
- if n.Xoffset != 0 {
- var n1 gc.Node
- gc.Nodconst(&n1, gc.Types[gc.TINT32], n.Xoffset)
- var n2 gc.Node
- regalloc(&n2, n1.Type, nil)
- var n3 gc.Node
- regalloc(&n3, gc.Types[gc.Tptr], nil)
- gmove(&n1, &n2)
- gmove(res, &n3)
- gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
- gmove(&n3, res)
- regfree(&n2)
- regfree(&n3)
- }
- }
-}
-
-/*
- * generate:
- * newreg = &n;
- * res = newreg
- *
- * on exit, a has been changed to be *newreg.
- * caller must regfree(a).
- * The generated code checks that the result is not *nil.
- */
-func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\nigen-n", n)
- }
-
- switch n.Op {
- case gc.ONAME:
- if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
- break
- }
- *a = *n
- return
-
- // Increase the refcount of the register so that igen's caller
- // has to call regfree.
- case gc.OINDREG:
- if n.Val.U.Reg != arm.REGSP {
- reg[n.Val.U.Reg]++
- }
- *a = *n
- return
-
- case gc.ODOT:
- igen(n.Left, a, res)
- a.Xoffset += n.Xoffset
- a.Type = n.Type
- return
-
- case gc.ODOTPTR:
- if n.Left.Addable != 0 || n.Left.Op == gc.OCALLFUNC || n.Left.Op == gc.OCALLMETH || n.Left.Op == gc.OCALLINTER {
- // igen-able nodes.
- var n1 gc.Node
- igen(n.Left, &n1, res)
-
- regalloc(a, gc.Types[gc.Tptr], &n1)
- gmove(&n1, a)
- regfree(&n1)
- } else {
- regalloc(a, gc.Types[gc.Tptr], res)
- cgen(n.Left, a)
- }
-
- gc.Cgen_checknil(a)
- a.Op = gc.OINDREG
- a.Xoffset = n.Xoffset
- a.Type = n.Type
- return
-
- // Release res so that it is available for cgen_call.
- // Pick it up again after the call.
- case gc.OCALLMETH,
- gc.OCALLFUNC,
- gc.OCALLINTER:
- r := -1
-
- if n.Ullman >= gc.UINF {
- if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
- r = int(res.Val.U.Reg)
- reg[r]--
- }
- }
-
- switch n.Op {
- case gc.OCALLMETH:
- gc.Cgen_callmeth(n, 0)
-
- case gc.OCALLFUNC:
- cgen_call(n, 0)
-
- case gc.OCALLINTER:
- cgen_callinter(n, nil, 0)
- }
-
- if r >= 0 {
- reg[r]++
- }
- regalloc(a, gc.Types[gc.Tptr], res)
- cgen_aret(n, a)
- a.Op = gc.OINDREG
- a.Type = n.Type
- return
- }
-
- agenr(n, a, res)
- a.Op = gc.OINDREG
- a.Type = n.Type
-}
-
-/*
- * allocate a register in res and generate
- * newreg = &n
- * The caller must call regfree(a).
- */
-func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("cgenr-n", n)
- }
-
- if gc.Isfat(n.Type) {
- gc.Fatal("cgenr on fat node")
- }
-
- if n.Addable != 0 {
- regalloc(a, gc.Types[gc.Tptr], res)
- gmove(n, a)
- return
- }
-
- switch n.Op {
- case gc.ONAME,
- gc.ODOT,
- gc.ODOTPTR,
- gc.OINDEX,
- gc.OCALLFUNC,
- gc.OCALLMETH,
- gc.OCALLINTER:
- var n1 gc.Node
- igen(n, &n1, res)
- regalloc(a, gc.Types[gc.Tptr], &n1)
- gmove(&n1, a)
- regfree(&n1)
-
- default:
- regalloc(a, n.Type, res)
- cgen(n, a)
- }
-}
-
-/*
- * generate:
- * newreg = &n;
- *
- * caller must regfree(a).
- * The generated code checks that the result is not nil.
- */
-func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("agenr-n", n)
- }
-
- nl := n.Left
- nr := n.Right
-
- switch n.Op {
- case gc.ODOT,
- gc.ODOTPTR,
- gc.OCALLFUNC,
- gc.OCALLMETH,
- gc.OCALLINTER:
- var n1 gc.Node
- igen(n, &n1, res)
- regalloc(a, gc.Types[gc.Tptr], &n1)
- agen(&n1, a)
- regfree(&n1)
-
- case gc.OIND:
- cgenr(n.Left, a, res)
- gc.Cgen_checknil(a)
-
- case gc.OINDEX:
- var p2 *obj.Prog // to be patched to panicindex.
- w := uint32(n.Type.Width)
- bounded := gc.Debug['B'] != 0 || n.Bounded
- var n1 gc.Node
- var n3 gc.Node
- if nr.Addable != 0 {
- var tmp gc.Node
- if !gc.Isconst(nr, gc.CTINT) {
- gc.Tempname(&tmp, gc.Types[gc.TINT32])
- }
- if !gc.Isconst(nl, gc.CTSTR) {
- agenr(nl, &n3, res)
- }
- if !gc.Isconst(nr, gc.CTINT) {
- p2 = cgenindex(nr, &tmp, bounded)
- regalloc(&n1, tmp.Type, nil)
- gmove(&tmp, &n1)
- }
- } else if nl.Addable != 0 {
- if !gc.Isconst(nr, gc.CTINT) {
- var tmp gc.Node
- gc.Tempname(&tmp, gc.Types[gc.TINT32])
- p2 = cgenindex(nr, &tmp, bounded)
- regalloc(&n1, tmp.Type, nil)
- gmove(&tmp, &n1)
- }
-
- if !gc.Isconst(nl, gc.CTSTR) {
- agenr(nl, &n3, res)
- }
- } else {
- var tmp gc.Node
- gc.Tempname(&tmp, gc.Types[gc.TINT32])
- p2 = cgenindex(nr, &tmp, bounded)
- nr = &tmp
- if !gc.Isconst(nl, gc.CTSTR) {
- agenr(nl, &n3, res)
- }
- regalloc(&n1, tmp.Type, nil)
- gins(optoas(gc.OAS, tmp.Type), &tmp, &n1)
- }
-
- // &a is in &n3 (allocated in res)
- // i is in &n1 (if not constant)
- // w is width
-
- // constant index
- if gc.Isconst(nr, gc.CTINT) {
- if gc.Isconst(nl, gc.CTSTR) {
- gc.Fatal("constant string constant index")
- }
- v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
- var n2 gc.Node
- if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
- if gc.Debug['B'] == 0 && !n.Bounded {
- n1 = n3
- n1.Op = gc.OINDREG
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset = int64(gc.Array_nel)
- var n4 gc.Node
- regalloc(&n4, n1.Type, nil)
- gmove(&n1, &n4)
- gc.Nodconst(&n2, gc.Types[gc.TUINT32], int64(v))
- gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n4, &n2)
- regfree(&n4)
- p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
- ginscall(gc.Panicindex, 0)
- gc.Patch(p1, gc.Pc)
- }
-
- n1 = n3
- n1.Op = gc.OINDREG
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset = int64(gc.Array_array)
- gmove(&n1, &n3)
- }
-
- gc.Nodconst(&n2, gc.Types[gc.Tptr], int64(v*uint64(w)))
- gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
- *a = n3
- break
- }
-
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.TINT32], &n1) // i
- gmove(&n1, &n2)
- regfree(&n1)
-
- var n4 gc.Node
- if gc.Debug['B'] == 0 && !n.Bounded {
- // check bounds
- if gc.Isconst(nl, gc.CTSTR) {
- gc.Nodconst(&n4, gc.Types[gc.TUINT32], int64(len(nl.Val.U.Sval)))
- } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
- n1 = n3
- n1.Op = gc.OINDREG
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset = int64(gc.Array_nel)
- regalloc(&n4, gc.Types[gc.TUINT32], nil)
- gmove(&n1, &n4)
- } else {
- gc.Nodconst(&n4, gc.Types[gc.TUINT32], nl.Type.Bound)
- }
-
- gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n2, &n4)
- if n4.Op == gc.OREGISTER {
- regfree(&n4)
- }
- p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
- if p2 != nil {
- gc.Patch(p2, gc.Pc)
- }
- ginscall(gc.Panicindex, 0)
- gc.Patch(p1, gc.Pc)
- }
-
- if gc.Isconst(nl, gc.CTSTR) {
- regalloc(&n3, gc.Types[gc.Tptr], res)
- p1 := gins(arm.AMOVW, nil, &n3)
- gc.Datastring(nl.Val.U.Sval, &p1.From)
- p1.From.Type = obj.TYPE_ADDR
- } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
- n1 = n3
- n1.Op = gc.OINDREG
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset = int64(gc.Array_array)
- gmove(&n1, &n3)
- }
-
- if w == 0 {
- } else // nothing to do
- if w == 1 || w == 2 || w == 4 || w == 8 {
- n4 = gc.Node{}
- n4.Op = gc.OADDR
- n4.Left = &n2
- cgen(&n4, &n3)
- if w == 1 {
- gins(arm.AADD, &n2, &n3)
- } else if w == 2 {
- gshift(arm.AADD, &n2, arm.SHIFT_LL, 1, &n3)
- } else if w == 4 {
- gshift(arm.AADD, &n2, arm.SHIFT_LL, 2, &n3)
- } else if w == 8 {
- gshift(arm.AADD, &n2, arm.SHIFT_LL, 3, &n3)
- }
- } else {
- regalloc(&n4, gc.Types[gc.TUINT32], nil)
- gc.Nodconst(&n1, gc.Types[gc.TUINT32], int64(w))
- gmove(&n1, &n4)
- gins(optoas(gc.OMUL, gc.Types[gc.TUINT32]), &n4, &n2)
- gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
- regfree(&n4)
- }
-
- *a = n3
- regfree(&n2)
-
- default:
- regalloc(a, gc.Types[gc.Tptr], res)
- agen(n, a)
- }
+func igenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
+ gc.Tempname(res, n.Type)
+ return cgenindex(n, res, bounded)
}
func gencmp0(n *gc.Node, t *gc.Type, o int, likely int, to *obj.Prog) {
var n1 gc.Node
- regalloc(&n1, t, nil)
- cgen(n, &n1)
+ gc.Regalloc(&n1, t, nil)
+ gc.Cgen(n, &n1)
a := optoas(gc.OCMP, t)
if a != arm.ACMP {
var n2 gc.Node
gc.Nodconst(&n2, t, 0)
var n3 gc.Node
- regalloc(&n3, t, nil)
+ gc.Regalloc(&n3, t, nil)
gmove(&n2, &n3)
- gcmp(a, &n1, &n3)
- regfree(&n3)
+ gins(a, &n1, &n3)
+ gc.Regfree(&n3)
} else {
gins(arm.ATST, &n1, nil)
}
a = optoas(o, t)
gc.Patch(gc.Gbranch(a, t, likely), to)
- regfree(&n1)
-}
-
-/*
- * generate:
- * if(n == true) goto to;
- */
-func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\nbgen", n)
- }
-
- if n == nil {
- n = gc.Nodbool(true)
- }
-
- if n.Ninit != nil {
- gc.Genlist(n.Ninit)
- }
-
- if n.Type == nil {
- gc.Convlit(&n, gc.Types[gc.TBOOL])
- if n.Type == nil {
- return
- }
- }
-
- et := int(n.Type.Etype)
- if et != gc.TBOOL {
- gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
- gc.Patch(gins(obj.AEND, nil, nil), to)
- return
- }
-
- var nr *gc.Node
-
- var nl *gc.Node
- switch n.Op {
- default:
- a := gc.ONE
- if !true_ {
- a = gc.OEQ
- }
- gencmp0(n, n.Type, a, likely, to)
- return
-
- // need to ask if it is bool?
- case gc.OLITERAL:
- if !true_ == (n.Val.U.Bval == 0) {
- gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
- }
- return
-
- case gc.OANDAND,
- gc.OOROR:
- if (n.Op == gc.OANDAND) == true_ {
- p1 := gc.Gbranch(obj.AJMP, nil, 0)
- p2 := gc.Gbranch(obj.AJMP, nil, 0)
- gc.Patch(p1, gc.Pc)
- bgen(n.Left, !true_, -likely, p2)
- bgen(n.Right, !true_, -likely, p2)
- p1 = gc.Gbranch(obj.AJMP, nil, 0)
- gc.Patch(p1, to)
- gc.Patch(p2, gc.Pc)
- } else {
- bgen(n.Left, true_, likely, to)
- bgen(n.Right, true_, likely, to)
- }
-
- return
-
- case gc.OEQ,
- gc.ONE,
- gc.OLT,
- gc.OGT,
- gc.OLE,
- gc.OGE:
- nr = n.Right
- if nr == nil || nr.Type == nil {
- return
- }
- fallthrough
-
- case gc.ONOT: // unary
- nl = n.Left
-
- if nl == nil || nl.Type == nil {
- return
- }
- }
-
- switch n.Op {
- case gc.ONOT:
- bgen(nl, !true_, likely, to)
- return
-
- case gc.OEQ,
- gc.ONE,
- gc.OLT,
- gc.OGT,
- gc.OLE,
- gc.OGE:
- a := int(n.Op)
- if !true_ {
- if gc.Isfloat[nl.Type.Etype] {
- // brcom is not valid on floats when NaN is involved.
- p1 := gc.Gbranch(arm.AB, nil, 0)
-
- p2 := gc.Gbranch(arm.AB, nil, 0)
- gc.Patch(p1, gc.Pc)
- ll := n.Ninit
- n.Ninit = nil
- bgen(n, true, -likely, p2)
- n.Ninit = ll
- gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
- gc.Patch(p2, gc.Pc)
- return
- }
-
- a = gc.Brcom(a)
- true_ = !true_
- }
-
- // make simplest on right
- if nl.Op == gc.OLITERAL || (nl.Ullman < gc.UINF && nl.Ullman < nr.Ullman) {
- a = gc.Brrev(a)
- r := nl
- nl = nr
- nr = r
- }
-
- if gc.Isslice(nl.Type) {
- // only valid to cmp darray to literal nil
- if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
- gc.Yyerror("illegal array comparison")
- break
- }
-
- var n1 gc.Node
- igen(nl, &n1, nil)
- n1.Xoffset += int64(gc.Array_array)
- n1.Type = gc.Types[gc.Tptr]
- gencmp0(&n1, gc.Types[gc.Tptr], a, likely, to)
- regfree(&n1)
- break
- }
-
- if gc.Isinter(nl.Type) {
- // front end shold only leave cmp to literal nil
- if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
- gc.Yyerror("illegal interface comparison")
- break
- }
-
- var n1 gc.Node
- igen(nl, &n1, nil)
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset += 0
- gencmp0(&n1, gc.Types[gc.Tptr], a, likely, to)
- regfree(&n1)
- break
- }
-
- if gc.Iscomplex[nl.Type.Etype] {
- gc.Complexbool(a, nl, nr, true_, likely, to)
- break
- }
-
- if gc.Is64(nr.Type) {
- if nl.Addable == 0 {
- var n1 gc.Node
- gc.Tempname(&n1, nl.Type)
- cgen(nl, &n1)
- nl = &n1
- }
-
- if nr.Addable == 0 {
- var n2 gc.Node
- gc.Tempname(&n2, nr.Type)
- cgen(nr, &n2)
- nr = &n2
- }
-
- cmp64(nl, nr, a, likely, to)
- break
- }
-
- if nr.Op == gc.OLITERAL {
- if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) == 0 {
- gencmp0(nl, nl.Type, a, likely, to)
- break
- }
-
- if nr.Val.Ctype == gc.CTNIL {
- gencmp0(nl, nl.Type, a, likely, to)
- break
- }
- }
-
- a = optoas(a, nr.Type)
-
- if nr.Ullman >= gc.UINF {
- var n1 gc.Node
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
-
- var tmp gc.Node
- gc.Tempname(&tmp, nl.Type)
- gmove(&n1, &tmp)
- regfree(&n1)
-
- var n2 gc.Node
- regalloc(&n2, nr.Type, nil)
- cgen(nr, &n2)
-
- regalloc(&n1, nl.Type, nil)
- cgen(&tmp, &n1)
-
- gcmp(optoas(gc.OCMP, nr.Type), &n1, &n2)
- gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
-
- regfree(&n1)
- regfree(&n2)
- break
- }
-
- var n3 gc.Node
- gc.Tempname(&n3, nl.Type)
- cgen(nl, &n3)
-
- var tmp gc.Node
- gc.Tempname(&tmp, nr.Type)
- cgen(nr, &tmp)
-
- var n1 gc.Node
- regalloc(&n1, nl.Type, nil)
- gmove(&n3, &n1)
-
- var n2 gc.Node
- regalloc(&n2, nr.Type, nil)
- gmove(&tmp, &n2)
-
- gcmp(optoas(gc.OCMP, nr.Type), &n1, &n2)
- if gc.Isfloat[nl.Type.Etype] {
- if n.Op == gc.ONE {
- p1 := gc.Gbranch(arm.ABVS, nr.Type, likely)
- gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
- gc.Patch(p1, to)
- } else {
- p1 := gc.Gbranch(arm.ABVS, nr.Type, -likely)
- gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
- gc.Patch(p1, gc.Pc)
- }
- } else {
- gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
- }
-
- regfree(&n1)
- regfree(&n2)
- }
-
- return
-}
-
-/*
- * n is on stack, either local variable
- * or return value from function call.
- * return n's offset from SP.
- */
-func stkof(n *gc.Node) int32 {
- switch n.Op {
- case gc.OINDREG:
- return int32(n.Xoffset)
-
- case gc.ODOT:
- t := n.Left.Type
- if gc.Isptr[t.Etype] {
- break
- }
- off := stkof(n.Left)
- if off == -1000 || off == 1000 {
- return off
- }
- return int32(int64(off) + n.Xoffset)
-
- case gc.OINDEX:
- t := n.Left.Type
- if !gc.Isfixedarray(t) {
- break
- }
- off := stkof(n.Left)
- if off == -1000 || off == 1000 {
- return off
- }
- if gc.Isconst(n.Right, gc.CTINT) {
- return int32(int64(off) + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval))
- }
- return 1000
-
- case gc.OCALLMETH,
- gc.OCALLINTER,
- gc.OCALLFUNC:
- t := n.Left.Type
- if gc.Isptr[t.Etype] {
- t = t.Type
- }
-
- var flist gc.Iter
- t = gc.Structfirst(&flist, gc.Getoutarg(t))
- if t != nil {
- return int32(t.Width + 4) // correct for LR
- }
- }
-
- // botch - probably failing to recognize address
- // arithmetic on the above. eg INDEX and DOT
- return -1000
+ gc.Regfree(&n1)
}
-/*
- * block copy:
- * memmove(&res, &n, w);
- * NB: character copy assumed little endian architecture
- */
-func sgen(n *gc.Node, res *gc.Node, w int64) {
- if gc.Debug['g'] != 0 {
- fmt.Printf("\nsgen w=%d\n", w)
- gc.Dump("r", n)
- gc.Dump("res", res)
- }
-
- if n.Ullman >= gc.UINF && res.Ullman >= gc.UINF {
- gc.Fatal("sgen UINF")
- }
-
- if w < 0 || int64(int32(w)) != w {
- gc.Fatal("sgen copy %d", w)
- }
-
- if n.Type == nil {
- gc.Fatal("sgen: missing type")
- }
-
- if w == 0 {
- // evaluate side effects only.
- var dst gc.Node
- regalloc(&dst, gc.Types[gc.Tptr], nil)
-
- agen(res, &dst)
- agen(n, &dst)
- regfree(&dst)
- return
- }
-
- // If copying .args, that's all the results, so record definition sites
- // for them for the liveness analysis.
- if res.Op == gc.ONAME && res.Sym.Name == ".args" {
- for l := gc.Curfn.Dcl; l != nil; l = l.Next {
- if l.N.Class == gc.PPARAMOUT {
- gc.Gvardef(l.N)
- }
- }
- }
-
- // Avoid taking the address for simple enough types.
- if gc.Componentgen(n, res) {
- return
- }
-
+func stackcopy(n, res *gc.Node, osrc, odst, w int64) {
// determine alignment.
// want to avoid unaligned access, so have to use
// smaller operations for less aligned types.
}
c := int32(w / int64(align))
- // offset on the stack
- osrc := stkof(n)
-
- odst := stkof(res)
- if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
- // osrc and odst both on stack, and at least one is in
- // an unknown position. Could generate code to test
- // for forward/backward copy, but instead just copy
- // to a temporary location first.
- var tmp gc.Node
- gc.Tempname(&tmp, n.Type)
-
- sgen(n, &tmp, w)
- sgen(&tmp, res, w)
- return
- }
-
- if osrc%int32(align) != 0 || odst%int32(align) != 0 {
+ if osrc%int64(align) != 0 || odst%int64(align) != 0 {
gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
}
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
dir := align
-
if osrc < odst && int64(odst) < int64(osrc)+w {
dir = -dir
}
if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 {
var r0 gc.Node
r0.Op = gc.OREGISTER
- r0.Val.U.Reg = REGALLOC_R0
+ r0.Val.U.Reg = arm.REG_R0
var r1 gc.Node
r1.Op = gc.OREGISTER
- r1.Val.U.Reg = REGALLOC_R0 + 1
+ r1.Val.U.Reg = arm.REG_R0 + 1
var r2 gc.Node
r2.Op = gc.OREGISTER
- r2.Val.U.Reg = REGALLOC_R0 + 2
+ r2.Val.U.Reg = arm.REG_R0 + 2
var src gc.Node
- regalloc(&src, gc.Types[gc.Tptr], &r1)
+ gc.Regalloc(&src, gc.Types[gc.Tptr], &r1)
var dst gc.Node
- regalloc(&dst, gc.Types[gc.Tptr], &r2)
+ gc.Regalloc(&dst, gc.Types[gc.Tptr], &r2)
if n.Ullman >= res.Ullman {
// eval n first
- agen(n, &src)
+ gc.Agen(n, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
- agen(res, &dst)
+ gc.Agen(res, &dst)
} else {
// eval res first
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
- agen(res, &dst)
- agen(n, &src)
+ gc.Agen(res, &dst)
+ gc.Agen(n, &src)
}
var tmp gc.Node
- regalloc(&tmp, gc.Types[gc.Tptr], &r0)
+ gc.Regalloc(&tmp, gc.Types[gc.Tptr], &r0)
f := gc.Sysfunc("duffcopy")
p := gins(obj.ADUFFCOPY, nil, f)
gc.Afunclit(&p.To, f)
// 8 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 8 * (128 - int64(c))
- regfree(&tmp)
- regfree(&src)
- regfree(&dst)
+ gc.Regfree(&tmp)
+ gc.Regfree(&src)
+ gc.Regfree(&dst)
return
}
var dst gc.Node
var src gc.Node
if n.Ullman >= res.Ullman {
- agenr(n, &dst, res) // temporarily use dst
- regalloc(&src, gc.Types[gc.Tptr], nil)
+ gc.Agenr(n, &dst, res) // temporarily use dst
+ gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
gins(arm.AMOVW, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
- agen(res, &dst)
+ gc.Agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
- agenr(res, &dst, res)
- agenr(n, &src, nil)
+ gc.Agenr(res, &dst, res)
+ gc.Agenr(n, &src, nil)
}
var tmp gc.Node
- regalloc(&tmp, gc.Types[gc.TUINT32], nil)
+ gc.Regalloc(&tmp, gc.Types[gc.TUINT32], nil)
// set up end marker
var nend gc.Node
if c >= 4 {
- regalloc(&nend, gc.Types[gc.TUINT32], nil)
+ gc.Regalloc(&nend, gc.Types[gc.TUINT32], nil)
p := gins(arm.AMOVW, &src, &nend)
p.From.Type = obj.TYPE_ADDR
raddr(&nend, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop)
- regfree(&nend)
+ gc.Regfree(&nend)
} else {
var p *obj.Prog
for {
}
}
- regfree(&dst)
- regfree(&src)
- regfree(&tmp)
+ gc.Regfree(&dst)
+ gc.Regfree(&src)
+ gc.Regfree(&tmp)
}
var t1 gc.Node
if l.Addable == 0 {
gc.Tempname(&t1, l.Type)
- cgen(l, &t1)
+ gc.Cgen(l, &t1)
l = &t1
}
var hi2 gc.Node
split64(res, &lo2, &hi2)
- regalloc(&t1, lo1.Type, nil)
+ gc.Regalloc(&t1, lo1.Type, nil)
var al gc.Node
- regalloc(&al, lo1.Type, nil)
+ gc.Regalloc(&al, lo1.Type, nil)
var ah gc.Node
- regalloc(&ah, hi1.Type, nil)
+ gc.Regalloc(&ah, hi1.Type, nil)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi1, &ah)
gins(arm.ASBC, &ah, &t1)
gins(arm.AMOVW, &t1, &hi2)
- regfree(&t1)
- regfree(&al)
- regfree(&ah)
+ gc.Regfree(&t1)
+ gc.Regfree(&al)
+ gc.Regfree(&ah)
splitclean()
splitclean()
return
case gc.OCOM:
- regalloc(&t1, lo1.Type, nil)
+ gc.Regalloc(&t1, lo1.Type, nil)
gmove(ncon(^uint32(0)), &t1)
var lo2 gc.Node
var hi2 gc.Node
split64(res, &lo2, &hi2)
var n1 gc.Node
- regalloc(&n1, lo1.Type, nil)
+ gc.Regalloc(&n1, lo1.Type, nil)
gins(arm.AMOVW, &lo1, &n1)
gins(arm.AEOR, &t1, &n1)
gins(arm.AEOR, &t1, &n1)
gins(arm.AMOVW, &n1, &hi2)
- regfree(&t1)
- regfree(&n1)
+ gc.Regfree(&t1)
+ gc.Regfree(&n1)
splitclean()
splitclean()
return
if r != nil && r.Addable == 0 {
var t2 gc.Node
gc.Tempname(&t2, r.Type)
- cgen(r, &t2)
+ gc.Cgen(r, &t2)
r = &t2
}
}
var al gc.Node
- regalloc(&al, lo1.Type, nil)
+ gc.Regalloc(&al, lo1.Type, nil)
var ah gc.Node
- regalloc(&ah, hi1.Type, nil)
+ gc.Regalloc(&ah, hi1.Type, nil)
// Do op. Leave result in ah:al.
switch n.Op {
// TODO: Constants
case gc.OADD:
var bl gc.Node
- regalloc(&bl, gc.Types[gc.TPTR32], nil)
+ gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)
var bh gc.Node
- regalloc(&bh, gc.Types[gc.TPTR32], nil)
+ gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
gins(arm.AMOVW, &hi1, &ah)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi2, &bh)
p1 := gins(arm.AADD, &bl, &al)
p1.Scond |= arm.C_SBIT
gins(arm.AADC, &bh, &ah)
- regfree(&bl)
- regfree(&bh)
+ gc.Regfree(&bl)
+ gc.Regfree(&bh)
// TODO: Constants.
case gc.OSUB:
var bl gc.Node
- regalloc(&bl, gc.Types[gc.TPTR32], nil)
+ gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)
var bh gc.Node
- regalloc(&bh, gc.Types[gc.TPTR32], nil)
+ gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi1, &ah)
gins(arm.AMOVW, &lo2, &bl)
p1 := gins(arm.ASUB, &bl, &al)
p1.Scond |= arm.C_SBIT
gins(arm.ASBC, &bh, &ah)
- regfree(&bl)
- regfree(&bh)
+ gc.Regfree(&bl)
+ gc.Regfree(&bh)
// TODO(kaib): this can be done with 4 regs and does not need 6
case gc.OMUL:
var bl gc.Node
- regalloc(&bl, gc.Types[gc.TPTR32], nil)
+ gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)
var bh gc.Node
- regalloc(&bh, gc.Types[gc.TPTR32], nil)
+ gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
var cl gc.Node
- regalloc(&cl, gc.Types[gc.TPTR32], nil)
+ gc.Regalloc(&cl, gc.Types[gc.TPTR32], nil)
var ch gc.Node
- regalloc(&ch, gc.Types[gc.TPTR32], nil)
+ gc.Regalloc(&ch, gc.Types[gc.TPTR32], nil)
// load args into bh:bl and bh:bl.
gins(arm.AMOVW, &hi1, &bh)
//print("%P\n", p1);
- regfree(&bh)
+ gc.Regfree(&bh)
- regfree(&bl)
- regfree(&ch)
- regfree(&cl)
+ gc.Regfree(&bl)
+ gc.Regfree(&ch)
+ gc.Regfree(&cl)
// We only rotate by a constant c in [0,64).
// if c >= 32:
v := uint64(gc.Mpgetfix(r.Val.U.Xval))
var bl gc.Node
- regalloc(&bl, lo1.Type, nil)
+ gc.Regalloc(&bl, lo1.Type, nil)
var bh gc.Node
- regalloc(&bh, hi1.Type, nil)
+ gc.Regalloc(&bh, hi1.Type, nil)
if v >= 32 {
// reverse during load to do the first 32 bits of rotate
v -= 32
gshift(arm.AORR, &bh, arm.SHIFT_LR, int32(32-v), &al)
}
- regfree(&bl)
- regfree(&bh)
+ gc.Regfree(&bl)
+ gc.Regfree(&bh)
case gc.OLSH:
var bl gc.Node
- regalloc(&bl, lo1.Type, nil)
+ gc.Regalloc(&bl, lo1.Type, nil)
var bh gc.Node
- regalloc(&bh, hi1.Type, nil)
+ gc.Regalloc(&bh, hi1.Type, nil)
gins(arm.AMOVW, &hi1, &bh)
gins(arm.AMOVW, &lo1, &bl)
goto olsh_break
}
- regalloc(&s, gc.Types[gc.TUINT32], nil)
- regalloc(&creg, gc.Types[gc.TUINT32], nil)
+ gc.Regalloc(&s, gc.Types[gc.TUINT32], nil)
+ gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil)
if gc.Is64(r.Type) {
// shift is >= 1<<32
var cl gc.Node
gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)
gmove(&n1, &creg)
- gcmp(arm.ACMP, &s, &creg)
+ gins(arm.ACMP, &s, &creg)
// MOVW.LO bl<<s, al
p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &al)
gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)
gmove(&n1, &creg)
- gcmp(arm.ACMP, &s, &creg)
+ gins(arm.ACMP, &s, &creg)
// EOR.LO al, al
p1 = gins(arm.AEOR, &al, &al)
gc.Patch(p3, gc.Pc)
gc.Patch(p4, gc.Pc)
gc.Patch(p5, gc.Pc)
- regfree(&s)
- regfree(&creg)
+ gc.Regfree(&s)
+ gc.Regfree(&creg)
olsh_break:
- regfree(&bl)
- regfree(&bh)
+ gc.Regfree(&bl)
+ gc.Regfree(&bh)
case gc.ORSH:
var bl gc.Node
- regalloc(&bl, lo1.Type, nil)
+ gc.Regalloc(&bl, lo1.Type, nil)
var bh gc.Node
- regalloc(&bh, hi1.Type, nil)
+ gc.Regalloc(&bh, hi1.Type, nil)
gins(arm.AMOVW, &hi1, &bh)
gins(arm.AMOVW, &lo1, &bl)
goto orsh_break
}
- regalloc(&s, gc.Types[gc.TUINT32], nil)
- regalloc(&creg, gc.Types[gc.TUINT32], nil)
+ gc.Regalloc(&s, gc.Types[gc.TUINT32], nil)
+ gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil)
if gc.Is64(r.Type) {
// shift is >= 1<<32
var ch gc.Node
gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)
gmove(&n1, &creg)
- gcmp(arm.ACMP, &s, &creg)
+ gins(arm.ACMP, &s, &creg)
// MOVW.LO bl>>s, al
p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LR, &s, &al)
gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)
gmove(&n1, &creg)
- gcmp(arm.ACMP, &s, &creg)
+ gins(arm.ACMP, &s, &creg)
// MOVW.LO creg>>1, creg
p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg)
gc.Patch(p3, gc.Pc)
gc.Patch(p4, gc.Pc)
gc.Patch(p5, gc.Pc)
- regfree(&s)
- regfree(&creg)
+ gc.Regfree(&s)
+ gc.Regfree(&creg)
orsh_break:
- regfree(&bl)
- regfree(&bh)
+ gc.Regfree(&bl)
+ gc.Regfree(&bh)
// TODO(kaib): literal optimizations
// make constant the right side (it usually is anyway).
gc.OAND,
gc.OOR:
var n1 gc.Node
- regalloc(&n1, lo1.Type, nil)
+ gc.Regalloc(&n1, lo1.Type, nil)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi1, &ah)
gins(optoas(int(n.Op), lo1.Type), &n1, &al)
gins(arm.AMOVW, &hi2, &n1)
gins(optoas(int(n.Op), lo1.Type), &n1, &ah)
- regfree(&n1)
+ gc.Regfree(&n1)
}
if gc.Is64(r.Type) {
splitclean()
//out:
- regfree(&al)
+ gc.Regfree(&al)
- regfree(&ah)
+ gc.Regfree(&ah)
}
/*
// if they differ, we're done.
t := hi1.Type
- regalloc(&r1, gc.Types[gc.TINT32], nil)
- regalloc(&r2, gc.Types[gc.TINT32], nil)
+ gc.Regalloc(&r1, gc.Types[gc.TINT32], nil)
+ gc.Regalloc(&r2, gc.Types[gc.TINT32], nil)
gins(arm.AMOVW, &hi1, &r1)
gins(arm.AMOVW, &hi2, &r2)
- gcmp(arm.ACMP, &r1, &r2)
- regfree(&r1)
- regfree(&r2)
+ gins(arm.ACMP, &r1, &r2)
+ gc.Regfree(&r1)
+ gc.Regfree(&r2)
var br *obj.Prog
switch op {
// compare least significant word
t = lo1.Type
- regalloc(&r1, gc.Types[gc.TINT32], nil)
- regalloc(&r2, gc.Types[gc.TINT32], nil)
+ gc.Regalloc(&r1, gc.Types[gc.TINT32], nil)
+ gc.Regalloc(&r2, gc.Types[gc.TINT32], nil)
gins(arm.AMOVW, &lo1, &r1)
gins(arm.AMOVW, &lo2, &r2)
- gcmp(arm.ACMP, &r1, &r2)
- regfree(&r1)
- regfree(&r2)
+ gins(arm.ACMP, &r1, &r2)
+ gc.Regfree(&r1)
+ gc.Regfree(&r2)
// jump again
gc.Patch(gc.Gbranch(optoas(op, t), nil, likely), to)
gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = arm.REGSP
gc.Thearch.REGCTXT = arm.REGCTXT
+ gc.Thearch.REGCALLX = arm.REG_R1
+ gc.Thearch.REGCALLX2 = arm.REG_R2
+ gc.Thearch.REGRETURN = arm.REG_R0
+ gc.Thearch.REGMIN = arm.REG_R0
+ gc.Thearch.REGMAX = arm.REGEXT
+ gc.Thearch.FREGMIN = arm.REG_F0
+ gc.Thearch.FREGMAX = arm.FREGEXT
gc.Thearch.MAXWIDTH = MAXWIDTH
- gc.Thearch.Anyregalloc = anyregalloc
+ gc.Thearch.ReservedRegs = resvd
+
gc.Thearch.Betypeinit = betypeinit
- gc.Thearch.Bgen = bgen
- gc.Thearch.Cgen = cgen
- gc.Thearch.Cgen_call = cgen_call
- gc.Thearch.Cgen_callinter = cgen_callinter
- gc.Thearch.Cgen_ret = cgen_ret
+ gc.Thearch.Cgen64 = cgen64
+ gc.Thearch.Cgen_hmul = cgen_hmul
+ gc.Thearch.Cgen_shift = cgen_shift
gc.Thearch.Clearfat = clearfat
+ gc.Thearch.Cmp64 = cmp64
gc.Thearch.Defframe = defframe
gc.Thearch.Excise = excise
gc.Thearch.Expandchecks = expandchecks
- gc.Thearch.Gclean = gclean
- gc.Thearch.Ginit = ginit
gc.Thearch.Gins = gins
- gc.Thearch.Ginscall = ginscall
+ gc.Thearch.Ginscon = ginscon
+ gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Gmove = gmove
- gc.Thearch.Igen = igen
+ gc.Thearch.Cgenindex = cgenindex
gc.Thearch.Linkarchinit = linkarchinit
gc.Thearch.Peep = peep
gc.Thearch.Proginfo = proginfo
- gc.Thearch.Regalloc = regalloc
- gc.Thearch.Regfree = regfree
gc.Thearch.Regtyp = regtyp
gc.Thearch.Sameaddr = sameaddr
gc.Thearch.Smallindir = smallindir
gc.Thearch.Stackaddr = stackaddr
+ gc.Thearch.Stackcopy = stackcopy
+ gc.Thearch.Sudoaddable = sudoaddable
+ gc.Thearch.Sudoclean = sudoclean
gc.Thearch.Excludedregs = excludedregs
gc.Thearch.RtoB = RtoB
gc.Thearch.FtoB = RtoB
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import "cmd/internal/obj/arm"
-
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-const (
- REGALLOC_R0 = arm.REG_R0
- REGALLOC_RMAX = arm.REGEXT
- REGALLOC_F0 = arm.REG_F0
- REGALLOC_FMAX = arm.FREGEXT
-)
-
-var reg [REGALLOC_FMAX + 1]uint8
-
-/*
- * cgen
- */
-
-/*
- * list.c
- */
-
-/*
- * reg.c
- */
return q
}
-/*
- * generate:
- * call f
- * proc=-1 normal call but no return
- * proc=0 normal call
- * proc=1 goroutine run in new proc
- * proc=2 defer call save away stack
- * proc=3 normal call to C pointer (not Go func value)
-*/
-func ginscall(f *gc.Node, proc int) {
- if f.Type != nil {
- extra := int32(0)
- if proc == 1 || proc == 2 {
- extra = 2 * int32(gc.Widthptr)
- }
- gc.Setmaxarg(f.Type, extra)
- }
-
- switch proc {
- default:
- gc.Fatal("ginscall: bad proc %d", proc)
-
- case 0, // normal call
- -1: // normal call but no return
- if f.Op == gc.ONAME && f.Class == gc.PFUNC {
- if f == gc.Deferreturn {
- // Deferred calls will appear to be returning to
- // the BL deferreturn(SB) that we are about to emit.
- // However, the stack trace code will show the line
- // of the instruction before that return PC.
- // To avoid that instruction being an unrelated instruction,
- // insert a NOP so that we will have the right line number.
- // ARM NOP 0x00000000 is really AND.EQ R0, R0, R0.
- // Use the latter form because the NOP pseudo-instruction
- // would be removed by the linker.
- var r gc.Node
- gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0)
-
- p := gins(arm.AAND, &r, &r)
- p.Scond = arm.C_SCOND_EQ
- }
-
- p := gins(arm.ABL, nil, f)
- gc.Afunclit(&p.To, f)
- if proc == -1 || gc.Noreturn(p) {
- gins(obj.AUNDEF, nil, nil)
- }
- break
- }
-
- var r gc.Node
- gc.Nodreg(&r, gc.Types[gc.Tptr], arm.REG_R7)
- var r1 gc.Node
- gc.Nodreg(&r1, gc.Types[gc.Tptr], arm.REG_R1)
- gmove(f, &r)
- r.Op = gc.OINDREG
- gmove(&r, &r1)
- r.Op = gc.OREGISTER
- r1.Op = gc.OINDREG
- gins(arm.ABL, &r, &r1)
-
- case 3: // normal call of c function pointer
- gins(arm.ABL, nil, f)
-
- case 1, // call in new proc (go)
- 2: // deferred call (defer)
- var r gc.Node
- regalloc(&r, gc.Types[gc.Tptr], nil)
-
- var con gc.Node
- gc.Nodconst(&con, gc.Types[gc.TINT32], int64(gc.Argsize(f.Type)))
- gins(arm.AMOVW, &con, &r)
- p := gins(arm.AMOVW, &r, nil)
- p.To.Type = obj.TYPE_MEM
- p.To.Reg = arm.REGSP
- p.To.Offset = 4
-
- gins(arm.AMOVW, f, &r)
- p = gins(arm.AMOVW, &r, nil)
- p.To.Type = obj.TYPE_MEM
- p.To.Reg = arm.REGSP
- p.To.Offset = 8
-
- regfree(&r)
-
- if proc == 1 {
- ginscall(gc.Newproc, 0)
- } else {
- ginscall(gc.Deferproc, 0)
- }
-
- if proc == 2 {
- gc.Nodconst(&con, gc.Types[gc.TINT32], 0)
- p := gins(arm.ACMP, &con, nil)
- p.Reg = arm.REG_R0
- p = gc.Gbranch(arm.ABEQ, nil, +1)
- cgen_ret(nil)
- gc.Patch(p, gc.Pc)
- }
- }
-}
-
-/*
- * n is call to interface method.
- * generate res = n.
- */
-func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
- i := n.Left
- if i.Op != gc.ODOTINTER {
- gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
- }
-
- f := i.Right // field
- if f.Op != gc.ONAME {
- gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
- }
-
- i = i.Left // interface
-
- // Release res register during genlist and cgen,
- // which might have their own function calls.
- r := -1
-
- if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
- r = int(res.Val.U.Reg)
- reg[r]--
- }
-
- if i.Addable == 0 {
- var tmpi gc.Node
- gc.Tempname(&tmpi, i.Type)
- cgen(i, &tmpi)
- i = &tmpi
- }
-
- gc.Genlist(n.List) // args
- if r >= 0 {
- reg[r]++
- }
-
- var nodr gc.Node
- regalloc(&nodr, gc.Types[gc.Tptr], res)
- var nodo gc.Node
- regalloc(&nodo, gc.Types[gc.Tptr], &nodr)
- nodo.Op = gc.OINDREG
-
- agen(i, &nodr) // REG = &inter
-
- var nodsp gc.Node
- gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], arm.REGSP)
-
- nodsp.Xoffset = int64(gc.Widthptr)
- if proc != 0 {
- nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
- }
- nodo.Xoffset += int64(gc.Widthptr)
- cgen(&nodo, &nodsp) // {4 or 12}(SP) = 4(REG) -- i.data
-
- nodo.Xoffset -= int64(gc.Widthptr)
-
- cgen(&nodo, &nodr) // REG = 0(REG) -- i.tab
- gc.Cgen_checknil(&nodr) // in case offset is huge
-
- nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
-
- if proc == 0 {
- // plain call: use direct c function pointer - more efficient
- cgen(&nodo, &nodr) // REG = 20+offset(REG) -- i.tab->fun[f]
- nodr.Op = gc.OINDREG
- proc = 3
- } else {
- // go/defer. generate go func value.
- p := gins(arm.AMOVW, &nodo, &nodr)
-
- p.From.Type = obj.TYPE_ADDR // REG = &(20+offset(REG)) -- i.tab->fun[f]
- }
-
- nodr.Type = n.Left.Type
- ginscall(&nodr, proc)
-
- regfree(&nodr)
- regfree(&nodo)
-}
-
-/*
- * generate function call;
- * proc=0 normal call
- * proc=1 goroutine run in new proc
- * proc=2 defer call save away stack
- */
-func cgen_call(n *gc.Node, proc int) {
- if n == nil {
- return
- }
-
- var afun gc.Node
- if n.Left.Ullman >= gc.UINF {
- // if name involves a fn call
- // precompute the address of the fn
- gc.Tempname(&afun, gc.Types[gc.Tptr])
-
- cgen(n.Left, &afun)
- }
-
- gc.Genlist(n.List) // assign the args
- t := n.Left.Type
-
- // call tempname pointer
- if n.Left.Ullman >= gc.UINF {
- var nod gc.Node
- regalloc(&nod, gc.Types[gc.Tptr], nil)
- gc.Cgen_as(&nod, &afun)
- nod.Type = t
- ginscall(&nod, proc)
- regfree(&nod)
- return
- }
-
- // call pointer
- if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
- var nod gc.Node
- regalloc(&nod, gc.Types[gc.Tptr], nil)
- gc.Cgen_as(&nod, n.Left)
- nod.Type = t
- ginscall(&nod, proc)
- regfree(&nod)
- return
- }
-
- // call direct
- n.Left.Method = 1
-
- ginscall(n.Left, proc)
-}
-
-/*
- * call to n has already been generated.
- * generate:
- * res = return value from call.
- */
-func cgen_callret(n *gc.Node, res *gc.Node) {
- t := n.Left.Type
- if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
- t = t.Type
- }
-
- var flist gc.Iter
- fp := gc.Structfirst(&flist, gc.Getoutarg(t))
- if fp == nil {
- gc.Fatal("cgen_callret: nil")
- }
-
- var nod gc.Node
- nod.Op = gc.OINDREG
- nod.Val.U.Reg = arm.REGSP
- nod.Addable = 1
-
- nod.Xoffset = fp.Width + 4 // +4: saved lr at 0(SP)
- nod.Type = fp.Type
- gc.Cgen_as(res, &nod)
-}
-
-/*
- * call to n has already been generated.
- * generate:
- * res = &return value from call.
- */
-func cgen_aret(n *gc.Node, res *gc.Node) {
- t := n.Left.Type
- if gc.Isptr[t.Etype] {
- t = t.Type
- }
-
- var flist gc.Iter
- fp := gc.Structfirst(&flist, gc.Getoutarg(t))
- if fp == nil {
- gc.Fatal("cgen_aret: nil")
- }
-
- var nod1 gc.Node
- nod1.Op = gc.OINDREG
- nod1.Val.U.Reg = arm.REGSP
- nod1.Addable = 1
-
- nod1.Xoffset = fp.Width + 4 // +4: saved lr at 0(SP)
- nod1.Type = fp.Type
-
- if res.Op != gc.OREGISTER {
- var nod2 gc.Node
- regalloc(&nod2, gc.Types[gc.Tptr], res)
- agen(&nod1, &nod2)
- gins(arm.AMOVW, &nod2, res)
- regfree(&nod2)
- } else {
- agen(&nod1, res)
- }
-}
-
-/*
- * generate return.
- * n->left is assignments to return values.
- */
-func cgen_ret(n *gc.Node) {
- if n != nil {
- gc.Genlist(n.List) // copy out args
- }
- if gc.Hasdefer != 0 {
- ginscall(gc.Deferreturn, 0)
- }
- gc.Genlist(gc.Curfn.Exit)
- p := gins(obj.ARET, nil, nil)
- if n != nil && n.Op == gc.ORETJMP {
- p.To.Name = obj.NAME_EXTERN
- p.To.Type = obj.TYPE_ADDR
- p.To.Sym = gc.Linksym(n.Left.Sym)
- }
-}
-
/*
* generate high multiply
* res = (nl * nr) >> wordsize
t := nl.Type
w := int(t.Width * 8)
var n1 gc.Node
- regalloc(&n1, t, res)
- cgen(nl, &n1)
+ gc.Regalloc(&n1, t, res)
+ gc.Cgen(nl, &n1)
var n2 gc.Node
- regalloc(&n2, t, nil)
- cgen(nr, &n2)
+ gc.Regalloc(&n2, t, nil)
+ gc.Cgen(nr, &n2)
switch gc.Simtype[t.Etype] {
case gc.TINT8,
gc.TINT16:
gc.Fatal("cgen_hmul %v", gc.Tconv(t, 0))
}
- cgen(&n1, res)
- regfree(&n1)
- regfree(&n2)
+ gc.Cgen(&n1, res)
+ gc.Regfree(&n1)
+ gc.Regfree(&n2)
}
/*
if op == gc.OLROT {
v := int(gc.Mpgetfix(nr.Val.U.Xval))
var n1 gc.Node
- regalloc(&n1, nl.Type, res)
+ gc.Regalloc(&n1, nl.Type, res)
if w == 32 {
- cgen(nl, &n1)
+ gc.Cgen(nl, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
} else {
var n2 gc.Node
- regalloc(&n2, nl.Type, nil)
- cgen(nl, &n2)
+ gc.Regalloc(&n2, nl.Type, nil)
+ gc.Cgen(nl, &n2)
gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
- regfree(&n2)
+ gc.Regfree(&n2)
// Ensure sign/zero-extended result.
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
- regfree(&n1)
+ gc.Regfree(&n1)
return
}
if nr.Op == gc.OLITERAL {
var n1 gc.Node
- regalloc(&n1, nl.Type, res)
- cgen(nl, &n1)
+ gc.Regalloc(&n1, nl.Type, res)
+ gc.Cgen(nl, &n1)
sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if sc == 0 {
} else // nothing to do
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
- regfree(&n1)
+ gc.Regfree(&n1)
return
}
var nt gc.Node
gc.Tempname(&nt, nr.Type)
if nl.Ullman >= nr.Ullman {
- regalloc(&n2, nl.Type, res)
- cgen(nl, &n2)
- cgen(nr, &nt)
+ gc.Regalloc(&n2, nl.Type, res)
+ gc.Cgen(nl, &n2)
+ gc.Cgen(nr, &nt)
n1 = nt
} else {
- cgen(nr, &nt)
- regalloc(&n2, nl.Type, res)
- cgen(nl, &n2)
+ gc.Cgen(nr, &nt)
+ gc.Regalloc(&n2, nl.Type, res)
+ gc.Cgen(nl, &n2)
}
var hi gc.Node
var lo gc.Node
split64(&nt, &lo, &hi)
- regalloc(&n1, gc.Types[gc.TUINT32], nil)
- regalloc(&n3, gc.Types[gc.TUINT32], nil)
+ gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil)
+ gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil)
gmove(&lo, &n1)
gmove(&hi, &n3)
splitclean()
p1 := gins(arm.AMOVW, &t, &n1)
p1.Scond = arm.C_SCOND_NE
tr = gc.Types[gc.TUINT32]
- regfree(&n3)
+ gc.Regfree(&n3)
} else {
if nl.Ullman >= nr.Ullman {
- regalloc(&n2, nl.Type, res)
- cgen(nl, &n2)
- regalloc(&n1, nr.Type, nil)
- cgen(nr, &n1)
+ gc.Regalloc(&n2, nl.Type, res)
+ gc.Cgen(nl, &n2)
+ gc.Regalloc(&n1, nr.Type, nil)
+ gc.Cgen(nr, &n1)
} else {
- regalloc(&n1, nr.Type, nil)
- cgen(nr, &n1)
- regalloc(&n2, nl.Type, res)
- cgen(nl, &n2)
+ gc.Regalloc(&n1, nr.Type, nil)
+ gc.Cgen(nr, &n1)
+ gc.Regalloc(&n2, nl.Type, res)
+ gc.Cgen(nl, &n2)
}
}
// test and fix up large shifts
// TODO: if(!bounded), don't emit some of this.
- regalloc(&n3, tr, nil)
+ gc.Regalloc(&n3, tr, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
gmove(&t, &n3)
- gcmp(arm.ACMP, &n1, &n3)
+ gins(arm.ACMP, &n1, &n3)
if op == gc.ORSH {
var p1 *obj.Prog
var p2 *obj.Prog
p2.Scond = arm.C_SCOND_LO
}
- regfree(&n3)
+ gc.Regfree(&n3)
gc.Patch(p3, gc.Pc)
}
gmove(&n2, res)
- regfree(&n1)
- regfree(&n2)
+ gc.Regfree(&n1)
+ gc.Regfree(&n2)
}
func clearfat(nl *gc.Node) {
var r0 gc.Node
r0.Op = gc.OREGISTER
- r0.Val.U.Reg = REGALLOC_R0
+ r0.Val.U.Reg = arm.REG_R0
var r1 gc.Node
r1.Op = gc.OREGISTER
- r1.Val.U.Reg = REGALLOC_R0 + 1
+ r1.Val.U.Reg = arm.REG_R1
var dst gc.Node
- regalloc(&dst, gc.Types[gc.Tptr], &r1)
- agen(nl, &dst)
+ gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1)
+ gc.Agen(nl, &dst)
var nc gc.Node
gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
var nz gc.Node
- regalloc(&nz, gc.Types[gc.TUINT32], &r0)
- cgen(&nc, &nz)
+ gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0)
+ gc.Cgen(&nc, &nz)
if q > 128 {
var end gc.Node
- regalloc(&end, gc.Types[gc.Tptr], nil)
+ gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p := gins(arm.AMOVW, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q) * 4
raddr(&end, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
- regfree(&end)
+ gc.Regfree(&end)
} else if q >= 4 && !gc.Nacl {
f := gc.Sysfunc("duffzero")
p := gins(obj.ADUFFZERO, nil, f)
c--
}
- regfree(&dst)
- regfree(&nz)
+ gc.Regfree(&dst)
+ gc.Regfree(&nz)
}
// Called after regopt and peep have run.
p.Reg = int16(reg)
}
}
+
+func ginsnop() {
+ var r gc.Node
+ gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0)
+ p := gins(arm.AAND, &r, &r)
+ p.Scond = arm.C_SCOND_EQ
+}
+
+/*
+ * generate
+ * as $c, n
+ */
+func ginscon(as int, c int64, n *gc.Node) {
+ var n1 gc.Node
+ gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
+ var n2 gc.Node
+ gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
+ gmove(&n1, &n2)
+ gins(as, &n2, n)
+ gc.Regfree(&n2)
+}
+
+// addr += index*width if possible.
+func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
+ switch width {
+ case 2:
+ gshift(arm.AADD, index, arm.SHIFT_LL, 1, addr)
+ return true
+ case 4:
+ gshift(arm.AADD, index, arm.SHIFT_LL, 2, addr)
+ return true
+ case 8:
+ gshift(arm.AADD, index, arm.SHIFT_LL, 3, addr)
+ return true
+ }
+ return false
+}
var unmappedzero int = 4096
var resvd = []int{
- 9, // reserved for m
- 10, // reserved for g
- arm.REGSP, // reserved for SP
-}
-
-func ginit() {
- for i := 0; i < len(reg); i++ {
- reg[i] = 0
- }
- for i := 0; i < len(resvd); i++ {
- reg[resvd[i]]++
- }
-}
-
-func gclean() {
- for i := 0; i < len(resvd); i++ {
- reg[resvd[i]]--
- }
-
- for i := 0; i < len(reg); i++ {
- if reg[i] != 0 {
- gc.Yyerror("reg %v left allocated\n", obj.Rconv(i))
- }
- }
-}
-
-func anyregalloc() bool {
- var j int
-
- for i := 0; i < len(reg); i++ {
- if reg[i] == 0 {
- goto ok
- }
- for j = 0; j < len(resvd); j++ {
- if resvd[j] == i {
- goto ok
- }
- }
- return true
- ok:
- }
-
- return false
-}
-
-var regpc [REGALLOC_FMAX + 1]uint32
-
-/*
- * allocate register of type t, leave in n.
- * if o != N, o is desired fixed register.
- * caller must regfree(n).
- */
-func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
- if false && gc.Debug['r'] != 0 {
- fixfree := 0
- for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
- if reg[i] == 0 {
- fixfree++
- }
- }
- floatfree := 0
- for i := REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
- if reg[i] == 0 {
- floatfree++
- }
- }
- fmt.Printf("regalloc fix %d float %d\n", fixfree, floatfree)
- }
-
- if t == nil {
- gc.Fatal("regalloc: t nil")
- }
- et := int(gc.Simtype[t.Etype])
- if gc.Is64(t) {
- gc.Fatal("regalloc: 64 bit type %v")
- }
-
- var i int
- switch et {
- case gc.TINT8,
- gc.TUINT8,
- gc.TINT16,
- gc.TUINT16,
- gc.TINT32,
- gc.TUINT32,
- gc.TPTR32,
- gc.TBOOL:
- if o != nil && o.Op == gc.OREGISTER {
- i = int(o.Val.U.Reg)
- if i >= REGALLOC_R0 && i <= REGALLOC_RMAX {
- goto out
- }
- }
-
- for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
- if reg[i] == 0 {
- regpc[i] = uint32(obj.Getcallerpc(&n))
- goto out
- }
- }
-
- fmt.Printf("registers allocated at\n")
- for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
- fmt.Printf("%d %p\n", i, regpc[i])
- }
- gc.Fatal("out of fixed registers")
- goto err
-
- case gc.TFLOAT32,
- gc.TFLOAT64:
- if o != nil && o.Op == gc.OREGISTER {
- i = int(o.Val.U.Reg)
- if i >= REGALLOC_F0 && i <= REGALLOC_FMAX {
- goto out
- }
- }
-
- for i = REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
- if reg[i] == 0 {
- goto out
- }
- }
- gc.Fatal("out of floating point registers")
- goto err
-
- case gc.TCOMPLEX64,
- gc.TCOMPLEX128:
- gc.Tempname(n, t)
- return
- }
-
- gc.Yyerror("regalloc: unknown type %v", gc.Tconv(t, 0))
-
-err:
- gc.Nodreg(n, t, arm.REG_R0)
- return
-
-out:
- reg[i]++
- gc.Nodreg(n, t, i)
-}
-
-func regfree(n *gc.Node) {
- if false && gc.Debug['r'] != 0 {
- fixfree := 0
- for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
- if reg[i] == 0 {
- fixfree++
- }
- }
- floatfree := 0
- for i := REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
- if reg[i] == 0 {
- floatfree++
- }
- }
- fmt.Printf("regalloc fix %d float %d\n", fixfree, floatfree)
- }
-
- if n.Op == gc.ONAME {
- return
- }
- if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
- gc.Fatal("regfree: not a register")
- }
- i := int(n.Val.U.Reg)
- if i == arm.REGSP {
- return
- }
- if i < 0 || i >= len(reg) || i >= len(regpc) {
- gc.Fatal("regfree: reg out of range")
- }
- if reg[i] <= 0 {
- gc.Fatal("regfree: reg %v not allocated", obj.Rconv(i))
- }
- reg[i]--
- if reg[i] == 0 {
- regpc[i] = 0
- }
+ arm.REG_R9, // formerly reserved for m; might be okay to reuse now; not sure about NaCl
+ arm.REG_R10, // reserved for g
}
/*
default:
var n1 gc.Node
if !dotaddable(n, &n1) {
- igen(n, &n1, nil)
+ gc.Igen(n, &n1, nil)
sclean[nsclean-1] = n1
}
case gc.ONAME:
if n.Class == gc.PPARAMREF {
var n1 gc.Node
- cgen(n.Heapaddr, &n1)
+ gc.Cgen(n.Heapaddr, &n1)
sclean[nsclean-1] = n1
n = &n1
}
}
nsclean--
if sclean[nsclean].Op != gc.OEMPTY {
- regfree(&sclean[nsclean])
+ gc.Regfree(&sclean[nsclean])
}
}
var con gc.Node
gc.Convconst(&con, gc.Types[gc.TINT32], &f.Val)
var r1 gc.Node
- regalloc(&r1, con.Type, t)
+ gc.Regalloc(&r1, con.Type, t)
gins(arm.AMOVW, &con, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
case gc.TUINT16,
var con gc.Node
gc.Convconst(&con, gc.Types[gc.TUINT32], &f.Val)
var r1 gc.Node
- regalloc(&r1, con.Type, t)
+ gc.Regalloc(&r1, con.Type, t)
gins(arm.AMOVW, &con, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
}
split64(f, &flo, &fhi)
var r1 gc.Node
- regalloc(&r1, t.Type, nil)
+ gc.Regalloc(&r1, t.Type, nil)
gins(arm.AMOVW, &flo, &r1)
gins(arm.AMOVW, &r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
splitclean()
return
var thi gc.Node
split64(t, &tlo, &thi)
var r1 gc.Node
- regalloc(&r1, flo.Type, nil)
+ gc.Regalloc(&r1, flo.Type, nil)
var r2 gc.Node
- regalloc(&r2, fhi.Type, nil)
+ gc.Regalloc(&r2, fhi.Type, nil)
gins(arm.AMOVW, &flo, &r1)
gins(arm.AMOVW, &fhi, &r2)
gins(arm.AMOVW, &r1, &tlo)
gins(arm.AMOVW, &r2, &thi)
- regfree(&r1)
- regfree(&r2)
+ gc.Regfree(&r1)
+ gc.Regfree(&r2)
splitclean()
splitclean()
return
split64(t, &tlo, &thi)
var r1 gc.Node
- regalloc(&r1, tlo.Type, nil)
+ gc.Regalloc(&r1, tlo.Type, nil)
var r2 gc.Node
- regalloc(&r2, thi.Type, nil)
+ gc.Regalloc(&r2, thi.Type, nil)
gmove(f, &r1)
p1 := gins(arm.AMOVW, &r1, &r2)
p1.From.Type = obj.TYPE_SHIFT
gins(arm.AMOVW, &r1, &tlo)
gins(arm.AMOVW, &r2, &thi)
- regfree(&r1)
- regfree(&r2)
+ gc.Regfree(&r1)
+ gc.Regfree(&r2)
splitclean()
return
gmove(f, &tlo)
var r1 gc.Node
- regalloc(&r1, thi.Type, nil)
+ gc.Regalloc(&r1, thi.Type, nil)
gins(arm.AMOVW, ncon(0), &r1)
gins(arm.AMOVW, &r1, &thi)
- regfree(&r1)
+ gc.Regfree(&r1)
splitclean()
return
}
var r1 gc.Node
- regalloc(&r1, gc.Types[ft], f)
+ gc.Regalloc(&r1, gc.Types[ft], f)
var r2 gc.Node
- regalloc(&r2, gc.Types[tt], t)
+ gc.Regalloc(&r2, gc.Types[tt], t)
gins(fa, f, &r1) // load to fpu
p1 := gins(a, &r1, &r1) // convert to w
switch tt {
gins(arm.AMOVW, &r1, &r2) // copy to cpu
gins(ta, &r2, t) // store
- regfree(&r1)
- regfree(&r2)
+ gc.Regfree(&r1)
+ gc.Regfree(&r2)
return
/*
}
var r1 gc.Node
- regalloc(&r1, gc.Types[ft], f)
+ gc.Regalloc(&r1, gc.Types[ft], f)
var r2 gc.Node
- regalloc(&r2, gc.Types[tt], t)
+ gc.Regalloc(&r2, gc.Types[tt], t)
gins(fa, f, &r1) // load to cpu
gins(arm.AMOVW, &r1, &r2) // copy to fpu
p1 := gins(a, &r2, &r2) // convert
}
gins(ta, &r2, t) // store
- regfree(&r1)
- regfree(&r2)
+ gc.Regfree(&r1)
+ gc.Regfree(&r2)
return
case gc.TUINT64<<16 | gc.TFLOAT32,
case gc.TFLOAT32<<16 | gc.TFLOAT64:
var r1 gc.Node
- regalloc(&r1, gc.Types[gc.TFLOAT64], t)
+ gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t)
gins(arm.AMOVF, f, &r1)
gins(arm.AMOVFD, &r1, &r1)
gins(arm.AMOVD, &r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
case gc.TFLOAT64<<16 | gc.TFLOAT32:
var r1 gc.Node
- regalloc(&r1, gc.Types[gc.TFLOAT64], t)
+ gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t)
gins(arm.AMOVD, f, &r1)
gins(arm.AMOVDF, &r1, &r1)
gins(arm.AMOVF, &r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
}
// requires register destination
rdst:
{
- regalloc(&r1, t.Type, t)
+ gc.Regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
}
// requires register intermediate
hard:
- regalloc(&r1, cvt, t)
+ gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
// truncate 64 bit integer
var flo gc.Node
split64(f, &flo, &fhi)
- regalloc(&r1, t.Type, nil)
+ gc.Regalloc(&r1, t.Type, nil)
gins(a, &flo, &r1)
gins(a, &r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
splitclean()
return
}
gc.Fatal("gins OINDEX not implemented")
}
- // regalloc(&nod, ®node, Z);
+ // gc.Regalloc(&nod, ®node, Z);
// v = constnode.vconst;
- // cgen(f->right, &nod);
+ // gc.Cgen(f->right, &nod);
// constnode.vconst = v;
// idx.reg = nod.reg;
- // regfree(&nod);
+ // gc.Regfree(&nod);
if t != nil && t.Op == gc.OINDEX {
gc.Fatal("gins OINDEX not implemented")
}
- // regalloc(&nod, ®node, Z);
+ // gc.Regalloc(&nod, ®node, Z);
// v = constnode.vconst;
- // cgen(t->right, &nod);
+ // gc.Cgen(t->right, &nod);
// constnode.vconst = v;
// idx.reg = nod.reg;
- // regfree(&nod);
+ // gc.Regfree(&nod);
p := gc.Prog(as)
gc.Naddr(&p.From, f)
gc.Naddr(&p.To, t)
+ switch as {
+ case arm.ABL:
+ if p.To.Type == obj.TYPE_REG {
+ p.To.Type = obj.TYPE_MEM
+ }
+
+ case arm.ACMP, arm.ACMPF, arm.ACMPD:
+ if t != nil {
+ if f.Op != gc.OREGISTER {
+ /* generate a comparison
+ TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites.
+ */
+ gc.Fatal("bad operands to gcmp")
+ }
+ p.From = p.To
+ p.To = obj.Addr{}
+ raddr(f, p)
+ }
+
+ case arm.AMULU:
+ if f != nil && f.Op != gc.OREGISTER {
+ gc.Fatal("bad operands to mul")
+ }
+
+ case arm.AMOVW:
+ if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR || p.From.Type == obj.TYPE_CONST) && (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) {
+ gc.Fatal("gins double memory")
+ }
+
+ case arm.AADD:
+ if p.To.Type == obj.TYPE_MEM {
+ gc.Fatal("gins arith to mem")
+ }
+
+ case arm.ARSB:
+ if p.From.Type == obj.TYPE_NONE {
+ gc.Fatal("rsb with no from")
+ }
+ }
+
if gc.Debug['g'] != 0 {
fmt.Printf("%v\n", p)
}
}
}
-/* generate a comparison
-TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites.
-*/
-func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
- if lhs.Op != gc.OREGISTER {
- gc.Fatal("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0))
- }
-
- p := gins(as, rhs, nil)
- raddr(lhs, p)
- return p
-}
-
/* generate a constant shift
* arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal.
*/
case gc.OCMP<<16 | gc.TFLOAT64:
a = arm.ACMPD
+ case gc.OPS<<16 | gc.TFLOAT32,
+ gc.OPS<<16 | gc.TFLOAT64:
+ a = arm.ABVS
+
case gc.OAS<<16 | gc.TBOOL:
a = arm.AMOVB
func sudoclean() {
if clean[cleani-1].Op != gc.OEMPTY {
- regfree(&clean[cleani-1])
+ gc.Regfree(&clean[cleani-1])
}
if clean[cleani-2].Op != gc.OEMPTY {
- regfree(&clean[cleani-2])
+ gc.Regfree(&clean[cleani-2])
}
cleani -= 2
}
* after successful sudoaddable,
* to release the register used for a.
*/
-func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool {
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
if n.Type == nil {
return false
}
return true
}
- regalloc(reg, gc.Types[gc.Tptr], nil)
+ gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
n1 := *reg
n1.Op = gc.OINDREG
if oary[0] >= 0 {
- agen(nn, reg)
+ gc.Agen(nn, reg)
n1.Xoffset = oary[0]
} else {
- cgen(nn, reg)
+ gc.Cgen(nn, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[0] + 1)
}
// R1 is ptr to memory, used and set, cannot be substituted.
case obj.ADUFFZERO:
if v.Type == obj.TYPE_REG {
- if v.Reg == REGALLOC_R0 {
+ if v.Reg == arm.REG_R0 {
return 1
}
- if v.Reg == REGALLOC_R0+1 {
+ if v.Reg == arm.REG_R0+1 {
return 2
}
}
// R1, R2 areptr to src, dst, used and set, cannot be substituted.
case obj.ADUFFCOPY:
if v.Type == obj.TYPE_REG {
- if v.Reg == REGALLOC_R0 {
+ if v.Reg == arm.REG_R0 {
return 3
}
- if v.Reg == REGALLOC_R0+1 || v.Reg == REGALLOC_R0+2 {
+ if v.Reg == arm.REG_R0+1 || v.Reg == arm.REG_R0+2 {
return 2
}
}
"cmd/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/x86"
- "fmt"
)
-/*
- * reg.c
- */
-
-/*
- * peep.c
- */
-/*
- * generate:
- * res = n;
- * simplifies and calls gmove.
- */
-func cgen(n *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\ncgen-n", n)
- gc.Dump("cgen-res", res)
- }
-
- if n == nil || n.Type == nil {
- return
- }
-
- if res == nil || res.Type == nil {
- gc.Fatal("cgen: res nil")
- }
-
- for n.Op == gc.OCONVNOP {
- n = n.Left
- }
-
- switch n.Op {
- case gc.OSLICE,
- gc.OSLICEARR,
- gc.OSLICESTR,
- gc.OSLICE3,
- gc.OSLICE3ARR:
- if res.Op != gc.ONAME || res.Addable == 0 {
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_slice(n, &n1)
- cgen(&n1, res)
- } else {
- gc.Cgen_slice(n, res)
- }
- return
-
- case gc.OEFACE:
- if res.Op != gc.ONAME || res.Addable == 0 {
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_eface(n, &n1)
- cgen(&n1, res)
- } else {
- gc.Cgen_eface(n, res)
- }
- return
- }
-
- if n.Ullman >= gc.UINF {
- if n.Op == gc.OINDREG {
- gc.Fatal("cgen: this is going to misscompile")
- }
- if res.Ullman >= gc.UINF {
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- cgen(n, &n1)
- cgen(&n1, res)
- return
- }
- }
-
- if gc.Isfat(n.Type) {
- if n.Type.Width < 0 {
- gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
- }
- sgen(n, res, n.Type.Width)
- return
- }
-
- if res.Addable == 0 {
- if n.Ullman > res.Ullman {
- var n1 gc.Node
- regalloc(&n1, n.Type, res)
- cgen(n, &n1)
- if n1.Ullman > res.Ullman {
- gc.Dump("n1", &n1)
- gc.Dump("res", res)
- gc.Fatal("loop in cgen")
- }
-
- cgen(&n1, res)
- regfree(&n1)
- return
- }
-
- var f int
- if res.Ullman >= gc.UINF {
- goto gen
- }
-
- if gc.Complexop(n, res) {
- gc.Complexgen(n, res)
- return
- }
-
- f = 1 // gen thru register
- switch n.Op {
- case gc.OLITERAL:
- if gc.Smallintconst(n) {
- f = 0
- }
-
- case gc.OREGISTER:
- f = 0
- }
-
- if !gc.Iscomplex[n.Type.Etype] {
- a := optoas(gc.OAS, res.Type)
- var addr obj.Addr
- if sudoaddable(a, res, &addr) {
- var p1 *obj.Prog
- if f != 0 {
- var n2 gc.Node
- regalloc(&n2, res.Type, nil)
- cgen(n, &n2)
- p1 = gins(a, &n2, nil)
- regfree(&n2)
- } else {
- p1 = gins(a, n, nil)
- }
- p1.To = addr
- if gc.Debug['g'] != 0 {
- fmt.Printf("%v [ignore previous line]\n", p1)
- }
- sudoclean()
- return
- }
- }
-
- gen:
- var n1 gc.Node
- igen(res, &n1, nil)
- cgen(n, &n1)
- regfree(&n1)
- return
- }
-
- // update addressability for string, slice
- // can't do in walk because n->left->addable
- // changes if n->left is an escaping local variable.
- switch n.Op {
- case gc.OSPTR,
- gc.OLEN:
- if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
- n.Addable = n.Left.Addable
- }
-
- case gc.OCAP:
- if gc.Isslice(n.Left.Type) {
- n.Addable = n.Left.Addable
- }
-
- case gc.OITAB:
- n.Addable = n.Left.Addable
- }
-
- if gc.Complexop(n, res) {
- gc.Complexgen(n, res)
- return
- }
-
- if n.Addable != 0 {
- gmove(n, res)
- return
- }
-
- nl := n.Left
- nr := n.Right
-
- if nl != nil && nl.Ullman >= gc.UINF {
- if nr != nil && nr.Ullman >= gc.UINF {
- var n1 gc.Node
- gc.Tempname(&n1, nl.Type)
- cgen(nl, &n1)
- n2 := *n
- n2.Left = &n1
- cgen(&n2, res)
- return
- }
- }
-
- if !gc.Iscomplex[n.Type.Etype] {
- a := optoas(gc.OAS, n.Type)
- var addr obj.Addr
- if sudoaddable(a, n, &addr) {
- if res.Op == gc.OREGISTER {
- p1 := gins(a, nil, res)
- p1.From = addr
- } else {
- var n2 gc.Node
- regalloc(&n2, n.Type, nil)
- p1 := gins(a, nil, &n2)
- p1.From = addr
- gins(a, &n2, res)
- regfree(&n2)
- }
-
- sudoclean()
- return
- }
- }
-
- var a int
- switch n.Op {
- default:
- gc.Dump("cgen", n)
- gc.Fatal("cgen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
-
- // these call bgen to get a bool value
- case gc.OOROR,
- gc.OANDAND,
- gc.OEQ,
- gc.ONE,
- gc.OLT,
- gc.OLE,
- gc.OGE,
- gc.OGT,
- gc.ONOT:
- p1 := gc.Gbranch(obj.AJMP, nil, 0)
-
- p2 := gc.Pc
- gmove(gc.Nodbool(true), res)
- p3 := gc.Gbranch(obj.AJMP, nil, 0)
- gc.Patch(p1, gc.Pc)
- bgen(n, true, 0, p2)
- gmove(gc.Nodbool(false), res)
- gc.Patch(p3, gc.Pc)
- return
-
- case gc.OPLUS:
- cgen(nl, res)
- return
-
- // unary
- case gc.OCOM:
- a := optoas(gc.OXOR, nl.Type)
-
- var n1 gc.Node
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
- var n2 gc.Node
- gc.Nodconst(&n2, nl.Type, -1)
- gins(a, &n2, &n1)
- gmove(&n1, res)
- regfree(&n1)
- return
-
- case gc.OMINUS:
- if gc.Isfloat[nl.Type.Etype] {
- nr = gc.Nodintconst(-1)
- gc.Convlit(&nr, n.Type)
- a = optoas(gc.OMUL, nl.Type)
- goto sbop
- }
-
- a := optoas(int(n.Op), nl.Type)
- // unary
- var n1 gc.Node
- regalloc(&n1, nl.Type, res)
-
- cgen(nl, &n1)
- gins(a, nil, &n1)
- gmove(&n1, res)
- regfree(&n1)
- return
-
- // symmetric binary
- case gc.OAND,
- gc.OOR,
- gc.OXOR,
- gc.OADD,
- gc.OMUL:
- a = optoas(int(n.Op), nl.Type)
-
- if a == x86.AIMULB {
- cgen_bmul(int(n.Op), nl, nr, res)
- break
- }
-
- goto sbop
-
- // asymmetric binary
- case gc.OSUB:
- a = optoas(int(n.Op), nl.Type)
-
- goto abop
-
- case gc.OHMUL:
- cgen_hmul(nl, nr, res)
-
- case gc.OCONV:
- if n.Type.Width > nl.Type.Width {
- // If loading from memory, do conversion during load,
- // so as to avoid use of 8-bit register in, say, int(*byteptr).
- switch nl.Op {
- case gc.ODOT,
- gc.ODOTPTR,
- gc.OINDEX,
- gc.OIND,
- gc.ONAME:
- var n1 gc.Node
- igen(nl, &n1, res)
- var n2 gc.Node
- regalloc(&n2, n.Type, res)
- gmove(&n1, &n2)
- gmove(&n2, res)
- regfree(&n2)
- regfree(&n1)
- return
- }
- }
-
- var n1 gc.Node
- regalloc(&n1, nl.Type, res)
- var n2 gc.Node
- regalloc(&n2, n.Type, &n1)
- cgen(nl, &n1)
-
- // if we do the conversion n1 -> n2 here
- // reusing the register, then gmove won't
- // have to allocate its own register.
- gmove(&n1, &n2)
-
- gmove(&n2, res)
- regfree(&n2)
- regfree(&n1)
-
- case gc.ODOT,
- gc.ODOTPTR,
- gc.OINDEX,
- gc.OIND,
- gc.ONAME: // PHEAP or PPARAMREF var
- var n1 gc.Node
- igen(n, &n1, res)
-
- gmove(&n1, res)
- regfree(&n1)
-
- // interface table is first word of interface value
- case gc.OITAB:
- var n1 gc.Node
- igen(nl, &n1, res)
-
- n1.Type = n.Type
- gmove(&n1, res)
- regfree(&n1)
-
- // pointer is the first word of string or slice.
- case gc.OSPTR:
- if gc.Isconst(nl, gc.CTSTR) {
- var n1 gc.Node
- regalloc(&n1, gc.Types[gc.Tptr], res)
- p1 := gins(x86.ALEAQ, nil, &n1)
- gc.Datastring(nl.Val.U.Sval, &p1.From)
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- var n1 gc.Node
- igen(nl, &n1, res)
- n1.Type = n.Type
- gmove(&n1, res)
- regfree(&n1)
-
- case gc.OLEN:
- if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
- // map and chan have len in the first int-sized word.
- // a zero pointer means zero length
- var n1 gc.Node
- regalloc(&n1, gc.Types[gc.Tptr], res)
-
- cgen(nl, &n1)
-
- var n2 gc.Node
- gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
- gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
-
- n2 = n1
- n2.Op = gc.OINDREG
- n2.Type = gc.Types[gc.Simtype[gc.TINT]]
- gmove(&n2, &n1)
-
- gc.Patch(p1, gc.Pc)
-
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
- // both slice and string have len one pointer into the struct.
- // a zero pointer means zero length
- var n1 gc.Node
- igen(nl, &n1, res)
-
- n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
- n1.Xoffset += int64(gc.Array_nel)
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
-
- case gc.OCAP:
- if gc.Istype(nl.Type, gc.TCHAN) {
- // chan has cap in the second int-sized word.
- // a zero pointer means zero length
- var n1 gc.Node
- regalloc(&n1, gc.Types[gc.Tptr], res)
-
- cgen(nl, &n1)
-
- var n2 gc.Node
- gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
- gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
-
- n2 = n1
- n2.Op = gc.OINDREG
- n2.Xoffset = int64(gc.Widthint)
- n2.Type = gc.Types[gc.Simtype[gc.TINT]]
- gmove(&n2, &n1)
-
- gc.Patch(p1, gc.Pc)
-
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- if gc.Isslice(nl.Type) {
- var n1 gc.Node
- igen(nl, &n1, res)
- n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
- n1.Xoffset += int64(gc.Array_cap)
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
-
- case gc.OADDR:
- if n.Bounded { // let race detector avoid nil checks
- gc.Disable_checknil++
- }
- agen(nl, res)
- if n.Bounded {
- gc.Disable_checknil--
- }
-
- case gc.OCALLMETH:
- gc.Cgen_callmeth(n, 0)
- cgen_callret(n, res)
-
- case gc.OCALLINTER:
- cgen_callinter(n, res, 0)
- cgen_callret(n, res)
-
- case gc.OCALLFUNC:
- cgen_call(n, 0)
- cgen_callret(n, res)
-
- case gc.OMOD,
- gc.ODIV:
- if gc.Isfloat[n.Type.Etype] {
- a = optoas(int(n.Op), nl.Type)
- goto abop
- }
-
- if nl.Ullman >= nr.Ullman {
- var n1 gc.Node
- regalloc(&n1, nl.Type, res)
- cgen(nl, &n1)
- cgen_div(int(n.Op), &n1, nr, res)
- regfree(&n1)
- } else {
- var n2 gc.Node
- if !gc.Smallintconst(nr) {
- regalloc(&n2, nr.Type, res)
- cgen(nr, &n2)
- } else {
- n2 = *nr
- }
-
- cgen_div(int(n.Op), nl, &n2, res)
- if n2.Op != gc.OLITERAL {
- regfree(&n2)
- }
- }
-
- case gc.OLSH,
- gc.ORSH,
- gc.OLROT:
- cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
- }
-
- return
-
- /*
- * put simplest on right - we'll generate into left
- * and then adjust it using the computation of right.
- * constants and variables have the same ullman
- * count, so look for constants specially.
- *
- * an integer constant we can use as an immediate
- * is simpler than a variable - we can use the immediate
- * in the adjustment instruction directly - so it goes
- * on the right.
- *
- * other constants, like big integers or floating point
- * constants, require a mov into a register, so those
- * might as well go on the left, so we can reuse that
- * register for the computation.
- */
-sbop: // symmetric binary
- if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) {
- r := nl
- nl = nr
- nr = r
- }
-
-abop: // asymmetric binary
- var n1 gc.Node
- var n2 gc.Node
- if nl.Ullman >= nr.Ullman {
- regalloc(&n1, nl.Type, res)
- cgen(nl, &n1)
-
- /*
- * This generates smaller code - it avoids a MOV - but it's
- * easily 10% slower due to not being able to
- * optimize/manipulate the move.
- * To see, run: go test -bench . crypto/md5
- * with and without.
- *
- if(sudoaddable(a, nr, &addr)) {
- p1 = gins(a, N, &n1);
- p1->from = addr;
- gmove(&n1, res);
- sudoclean();
- regfree(&n1);
- goto ret;
- }
- *
- */
- if gc.Smallintconst(nr) {
- n2 = *nr
- } else {
- regalloc(&n2, nr.Type, nil)
- cgen(nr, &n2)
- }
- } else {
- if gc.Smallintconst(nr) {
- n2 = *nr
- } else {
- regalloc(&n2, nr.Type, res)
- cgen(nr, &n2)
- }
-
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
- }
-
- gins(a, &n2, &n1)
- gmove(&n1, res)
- regfree(&n1)
- if n2.Op != gc.OLITERAL {
- regfree(&n2)
- }
- return
-}
-
-/*
- * allocate a register (reusing res if possible) and generate
- * a = n
- * The caller must call regfree(a).
- */
-func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("cgenr-n", n)
- }
-
- if gc.Isfat(n.Type) {
- gc.Fatal("cgenr on fat node")
- }
-
- if n.Addable != 0 {
- regalloc(a, n.Type, res)
- gmove(n, a)
- return
- }
-
- switch n.Op {
- case gc.ONAME,
- gc.ODOT,
- gc.ODOTPTR,
- gc.OINDEX,
- gc.OCALLFUNC,
- gc.OCALLMETH,
- gc.OCALLINTER:
- var n1 gc.Node
- igen(n, &n1, res)
- regalloc(a, gc.Types[gc.Tptr], &n1)
- gmove(&n1, a)
- regfree(&n1)
-
- default:
- regalloc(a, n.Type, res)
- cgen(n, a)
- }
-}
-
-/*
- * allocate a register (reusing res if possible) and generate
- * a = &n
- * The caller must call regfree(a).
- * The generated code checks that the result is not nil.
- */
-func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\nagenr-n", n)
- }
-
- nl := n.Left
- nr := n.Right
-
- switch n.Op {
- case gc.ODOT,
- gc.ODOTPTR,
- gc.OCALLFUNC,
- gc.OCALLMETH,
- gc.OCALLINTER:
- var n1 gc.Node
- igen(n, &n1, res)
- regalloc(a, gc.Types[gc.Tptr], &n1)
- agen(&n1, a)
- regfree(&n1)
-
- case gc.OIND:
- cgenr(n.Left, a, res)
- gc.Cgen_checknil(a)
-
- case gc.OINDEX:
- freelen := 0
- w := uint64(n.Type.Width)
-
- // Generate the non-addressable child first.
- var n3 gc.Node
- var nlen gc.Node
- var tmp gc.Node
- var n1 gc.Node
- if nr.Addable != 0 {
- goto irad
- }
- if nl.Addable != 0 {
- cgenr(nr, &n1, nil)
- if !gc.Isconst(nl, gc.CTSTR) {
- if gc.Isfixedarray(nl.Type) {
- agenr(nl, &n3, res)
- } else {
- igen(nl, &nlen, res)
- freelen = 1
- nlen.Type = gc.Types[gc.Tptr]
- nlen.Xoffset += int64(gc.Array_array)
- regalloc(&n3, gc.Types[gc.Tptr], res)
- gmove(&nlen, &n3)
- nlen.Type = gc.Types[gc.Simtype[gc.TUINT]]
- nlen.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
- }
- }
-
- goto index
- }
-
- gc.Tempname(&tmp, nr.Type)
- cgen(nr, &tmp)
- nr = &tmp
-
- irad:
- if !gc.Isconst(nl, gc.CTSTR) {
- if gc.Isfixedarray(nl.Type) {
- agenr(nl, &n3, res)
- } else {
- if nl.Addable == 0 {
- // igen will need an addressable node.
- var tmp2 gc.Node
- gc.Tempname(&tmp2, nl.Type)
-
- cgen(nl, &tmp2)
- nl = &tmp2
- }
-
- igen(nl, &nlen, res)
- freelen = 1
- nlen.Type = gc.Types[gc.Tptr]
- nlen.Xoffset += int64(gc.Array_array)
- regalloc(&n3, gc.Types[gc.Tptr], res)
- gmove(&nlen, &n3)
- nlen.Type = gc.Types[gc.Simtype[gc.TUINT]]
- nlen.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
- }
- }
-
- if !gc.Isconst(nr, gc.CTINT) {
- cgenr(nr, &n1, nil)
- }
-
- goto index
-
- // &a is in &n3 (allocated in res)
- // i is in &n1 (if not constant)
- // len(a) is in nlen (if needed)
- // w is width
-
- // constant index
- index:
- if gc.Isconst(nr, gc.CTINT) {
- if gc.Isconst(nl, gc.CTSTR) {
- gc.Fatal("constant string constant index") // front end should handle
- }
- v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
- if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
- if gc.Debug['B'] == 0 && !n.Bounded {
- var n2 gc.Node
- gc.Nodconst(&n2, gc.Types[gc.Simtype[gc.TUINT]], int64(v))
- if gc.Smallintconst(nr) {
- gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &n2)
- } else {
- regalloc(&tmp, gc.Types[gc.Simtype[gc.TUINT]], nil)
- gmove(&n2, &tmp)
- gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &tmp)
- regfree(&tmp)
- }
-
- p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
- ginscall(gc.Panicindex, -1)
- gc.Patch(p1, gc.Pc)
- }
-
- regfree(&nlen)
- }
-
- if v*w != 0 {
- ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), int64(v*w), &n3)
- }
- *a = n3
- break
- }
-
- // type of the index
- t := gc.Types[gc.TUINT64]
-
- if gc.Issigned[n1.Type.Etype] {
- t = gc.Types[gc.TINT64]
- }
-
- var n2 gc.Node
- regalloc(&n2, t, &n1) // i
- gmove(&n1, &n2)
- regfree(&n1)
-
- if gc.Debug['B'] == 0 && !n.Bounded {
- // check bounds
- t = gc.Types[gc.Simtype[gc.TUINT]]
-
- if gc.Is64(nr.Type) {
- t = gc.Types[gc.TUINT64]
- }
- if gc.Isconst(nl, gc.CTSTR) {
- gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval)))
- } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
- if gc.Is64(nr.Type) {
- var n5 gc.Node
- regalloc(&n5, t, nil)
- gmove(&nlen, &n5)
- regfree(&nlen)
- nlen = n5
- }
- } else {
- gc.Nodconst(&nlen, t, nl.Type.Bound)
- if !gc.Smallintconst(&nlen) {
- var n5 gc.Node
- regalloc(&n5, t, nil)
- gmove(&nlen, &n5)
- nlen = n5
- freelen = 1
- }
- }
-
- gins(optoas(gc.OCMP, t), &n2, &nlen)
- p1 := gc.Gbranch(optoas(gc.OLT, t), nil, +1)
- ginscall(gc.Panicindex, -1)
- gc.Patch(p1, gc.Pc)
- }
-
- if gc.Isconst(nl, gc.CTSTR) {
- regalloc(&n3, gc.Types[gc.Tptr], res)
- p1 := gins(x86.ALEAQ, nil, &n3)
- gc.Datastring(nl.Val.U.Sval, &p1.From)
- gins(x86.AADDQ, &n2, &n3)
- goto indexdone
- }
-
- if w == 0 {
- } else // nothing to do
- if w == 1 || w == 2 || w == 4 || w == 8 {
- p1 := gins(x86.ALEAQ, &n2, &n3)
- p1.From.Type = obj.TYPE_MEM
- p1.From.Scale = int16(w)
- p1.From.Index = p1.From.Reg
- p1.From.Reg = p1.To.Reg
- } else {
- ginscon(optoas(gc.OMUL, t), int64(w), &n2)
- gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
- }
-
- indexdone:
- *a = n3
- regfree(&n2)
- if freelen != 0 {
- regfree(&nlen)
- }
-
- default:
- regalloc(a, gc.Types[gc.Tptr], res)
- agen(n, a)
- }
-}
-
-/*
- * generate:
- * res = &n;
- * The generated code checks that the result is not nil.
- */
-func agen(n *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\nagen-res", res)
- gc.Dump("agen-r", n)
- }
-
- if n == nil || n.Type == nil {
- return
- }
-
- for n.Op == gc.OCONVNOP {
- n = n.Left
- }
-
- if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
- // Use of a nil interface or nil slice.
- // Create a temporary we can take the address of and read.
- // The generated code is just going to panic, so it need not
- // be terribly efficient. See issue 3670.
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
-
- gc.Gvardef(&n1)
- clearfat(&n1)
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.Tptr], res)
- gins(x86.ALEAQ, &n1, &n2)
- gmove(&n2, res)
- regfree(&n2)
- return
- }
-
- if n.Addable != 0 {
- var n1 gc.Node
- regalloc(&n1, gc.Types[gc.Tptr], res)
- gins(x86.ALEAQ, n, &n1)
- gmove(&n1, res)
- regfree(&n1)
- return
- }
-
- nl := n.Left
-
- switch n.Op {
- default:
- gc.Fatal("agen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
-
- case gc.OCALLMETH:
- gc.Cgen_callmeth(n, 0)
- cgen_aret(n, res)
-
- case gc.OCALLINTER:
- cgen_callinter(n, res, 0)
- cgen_aret(n, res)
-
- case gc.OCALLFUNC:
- cgen_call(n, 0)
- cgen_aret(n, res)
-
- case gc.OSLICE,
- gc.OSLICEARR,
- gc.OSLICESTR,
- gc.OSLICE3,
- gc.OSLICE3ARR:
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_slice(n, &n1)
- agen(&n1, res)
-
- case gc.OEFACE:
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_eface(n, &n1)
- agen(&n1, res)
-
- case gc.OINDEX:
- var n1 gc.Node
- agenr(n, &n1, res)
- gmove(&n1, res)
- regfree(&n1)
-
- // should only get here with names in this func.
- case gc.ONAME:
- if n.Funcdepth > 0 && n.Funcdepth != gc.Funcdepth {
- gc.Dump("bad agen", n)
- gc.Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, gc.Funcdepth)
- }
-
- // should only get here for heap vars or paramref
- if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
- gc.Dump("bad agen", n)
- gc.Fatal("agen: bad ONAME class %#x", n.Class)
- }
-
- cgen(n.Heapaddr, res)
- if n.Xoffset != 0 {
- ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
- }
-
- case gc.OIND:
- cgen(nl, res)
- gc.Cgen_checknil(res)
-
- case gc.ODOT:
- agen(nl, res)
- if n.Xoffset != 0 {
- ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
- }
-
- case gc.ODOTPTR:
- cgen(nl, res)
- gc.Cgen_checknil(res)
- if n.Xoffset != 0 {
- ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
- }
- }
-}
-
-/*
- * generate:
- * newreg = &n;
- * res = newreg
- *
- * on exit, a has been changed to be *newreg.
- * caller must regfree(a).
- * The generated code checks that the result is not *nil.
- */
-func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\nigen-n", n)
- }
-
- switch n.Op {
- case gc.ONAME:
- if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
- break
- }
- *a = *n
- return
-
- // Increase the refcount of the register so that igen's caller
- // has to call regfree.
- case gc.OINDREG:
- if n.Val.U.Reg != x86.REG_SP {
- reg[n.Val.U.Reg]++
- }
- *a = *n
- return
-
- case gc.ODOT:
- igen(n.Left, a, res)
- a.Xoffset += n.Xoffset
- a.Type = n.Type
- fixlargeoffset(a)
- return
-
- case gc.ODOTPTR:
- cgenr(n.Left, a, res)
- gc.Cgen_checknil(a)
- a.Op = gc.OINDREG
- a.Xoffset += n.Xoffset
- a.Type = n.Type
- fixlargeoffset(a)
- return
-
- case gc.OCALLFUNC,
- gc.OCALLMETH,
- gc.OCALLINTER:
- switch n.Op {
- case gc.OCALLFUNC:
- cgen_call(n, 0)
-
- case gc.OCALLMETH:
- gc.Cgen_callmeth(n, 0)
-
- case gc.OCALLINTER:
- cgen_callinter(n, nil, 0)
- }
-
- var flist gc.Iter
- fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
- *a = gc.Node{}
- a.Op = gc.OINDREG
- a.Val.U.Reg = x86.REG_SP
- a.Addable = 1
- a.Xoffset = fp.Width
- a.Type = n.Type
- return
-
- // Index of fixed-size array by constant can
- // put the offset in the addressing.
- // Could do the same for slice except that we need
- // to use the real index for the bounds checking.
- case gc.OINDEX:
- if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] && gc.Isfixedarray(n.Left.Left.Type)) {
- if gc.Isconst(n.Right, gc.CTINT) {
- // Compute &a.
- if !gc.Isptr[n.Left.Type.Etype] {
- igen(n.Left, a, res)
- } else {
- var n1 gc.Node
- igen(n.Left, &n1, res)
- gc.Cgen_checknil(&n1)
- regalloc(a, gc.Types[gc.Tptr], res)
- gmove(&n1, a)
- regfree(&n1)
- a.Op = gc.OINDREG
- }
-
- // Compute &a[i] as &a + i*width.
- a.Type = n.Type
-
- a.Xoffset += gc.Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width
- fixlargeoffset(a)
- return
- }
- }
- }
-
- agenr(n, a, res)
- a.Op = gc.OINDREG
- a.Type = n.Type
-}
-
-/*
- * generate:
- * if(n == true) goto to;
- */
-func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\nbgen", n)
- }
-
- if n == nil {
- n = gc.Nodbool(true)
- }
-
- if n.Ninit != nil {
- gc.Genlist(n.Ninit)
- }
-
- if n.Type == nil {
- gc.Convlit(&n, gc.Types[gc.TBOOL])
- if n.Type == nil {
- return
- }
- }
-
- et := int(n.Type.Etype)
- if et != gc.TBOOL {
- gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
- gc.Patch(gins(obj.AEND, nil, nil), to)
- return
- }
-
- for n.Op == gc.OCONVNOP {
- n = n.Left
- if n.Ninit != nil {
- gc.Genlist(n.Ninit)
- }
- }
-
- var nl *gc.Node
- var nr *gc.Node
- switch n.Op {
- default:
- goto def
-
- // need to ask if it is bool?
- case gc.OLITERAL:
- if !true_ == (n.Val.U.Bval == 0) {
- gc.Patch(gc.Gbranch(obj.AJMP, nil, likely), to)
- }
- return
-
- case gc.ONAME:
- if n.Addable == 0 {
- goto def
- }
- var n1 gc.Node
- gc.Nodconst(&n1, n.Type, 0)
- gins(optoas(gc.OCMP, n.Type), n, &n1)
- a := x86.AJNE
- if !true_ {
- a = x86.AJEQ
- }
- gc.Patch(gc.Gbranch(a, n.Type, likely), to)
- return
-
- case gc.OANDAND,
- gc.OOROR:
- if (n.Op == gc.OANDAND) == true_ {
- p1 := gc.Gbranch(obj.AJMP, nil, 0)
- p2 := gc.Gbranch(obj.AJMP, nil, 0)
- gc.Patch(p1, gc.Pc)
- bgen(n.Left, !true_, -likely, p2)
- bgen(n.Right, !true_, -likely, p2)
- p1 = gc.Gbranch(obj.AJMP, nil, 0)
- gc.Patch(p1, to)
- gc.Patch(p2, gc.Pc)
- } else {
- bgen(n.Left, true_, likely, to)
- bgen(n.Right, true_, likely, to)
- }
-
- return
-
- case gc.OEQ,
- gc.ONE,
- gc.OLT,
- gc.OGT,
- gc.OLE,
- gc.OGE:
- nr = n.Right
- if nr == nil || nr.Type == nil {
- return
- }
- fallthrough
-
- case gc.ONOT: // unary
- nl = n.Left
-
- if nl == nil || nl.Type == nil {
- return
- }
- }
-
- switch n.Op {
- case gc.ONOT:
- bgen(nl, !true_, likely, to)
- return
-
- case gc.OEQ,
- gc.ONE,
- gc.OLT,
- gc.OGT,
- gc.OLE,
- gc.OGE:
- a := int(n.Op)
- if !true_ {
- if gc.Isfloat[nr.Type.Etype] {
- // brcom is not valid on floats when NaN is involved.
- p1 := gc.Gbranch(obj.AJMP, nil, 0)
-
- p2 := gc.Gbranch(obj.AJMP, nil, 0)
- gc.Patch(p1, gc.Pc)
- ll := n.Ninit // avoid re-genning ninit
- n.Ninit = nil
- bgen(n, true, -likely, p2)
- n.Ninit = ll
- gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
- gc.Patch(p2, gc.Pc)
- return
- }
-
- a = gc.Brcom(a)
- true_ = !true_
- }
-
- // make simplest on right
- if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
- a = gc.Brrev(a)
- r := nl
- nl = nr
- nr = r
- }
-
- if gc.Isslice(nl.Type) {
- // front end should only leave cmp to literal nil
- if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
- gc.Yyerror("illegal slice comparison")
- break
- }
-
- a = optoas(a, gc.Types[gc.Tptr])
- var n1 gc.Node
- igen(nl, &n1, nil)
- n1.Xoffset += int64(gc.Array_array)
- n1.Type = gc.Types[gc.Tptr]
- var tmp gc.Node
- gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
- gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
- gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
- regfree(&n1)
- break
- }
-
- if gc.Isinter(nl.Type) {
- // front end should only leave cmp to literal nil
- if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
- gc.Yyerror("illegal interface comparison")
- break
- }
-
- a = optoas(a, gc.Types[gc.Tptr])
- var n1 gc.Node
- igen(nl, &n1, nil)
- n1.Type = gc.Types[gc.Tptr]
- var tmp gc.Node
- gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
- gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
- gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
- regfree(&n1)
- break
- }
-
- if gc.Iscomplex[nl.Type.Etype] {
- gc.Complexbool(a, nl, nr, true_, likely, to)
- break
- }
-
- var n2 gc.Node
- var n1 gc.Node
- if nr.Ullman >= gc.UINF {
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
-
- var tmp gc.Node
- gc.Tempname(&tmp, nl.Type)
- gmove(&n1, &tmp)
- regfree(&n1)
-
- regalloc(&n2, nr.Type, nil)
- cgen(nr, &n2)
-
- regalloc(&n1, nl.Type, nil)
- cgen(&tmp, &n1)
-
- goto cmp
- }
-
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
-
- if gc.Smallintconst(nr) {
- gins(optoas(gc.OCMP, nr.Type), &n1, nr)
- gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
- regfree(&n1)
- break
- }
-
- regalloc(&n2, nr.Type, nil)
- cgen(nr, &n2)
-
- // only < and <= work right with NaN; reverse if needed
- cmp:
- l := &n1
-
- r := &n2
- if gc.Isfloat[nl.Type.Etype] && (a == gc.OGT || a == gc.OGE) {
- l = &n2
- r = &n1
- a = gc.Brrev(a)
- }
-
- gins(optoas(gc.OCMP, nr.Type), l, r)
-
- if gc.Isfloat[nr.Type.Etype] && (n.Op == gc.OEQ || n.Op == gc.ONE) {
- if n.Op == gc.OEQ {
- // neither NE nor P
- p1 := gc.Gbranch(x86.AJNE, nil, -likely)
-
- p2 := gc.Gbranch(x86.AJPS, nil, -likely)
- gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
- gc.Patch(p1, gc.Pc)
- gc.Patch(p2, gc.Pc)
- } else {
- // either NE or P
- gc.Patch(gc.Gbranch(x86.AJNE, nil, likely), to)
-
- gc.Patch(gc.Gbranch(x86.AJPS, nil, likely), to)
- }
- } else {
- gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
- }
- regfree(&n1)
- regfree(&n2)
- }
-
- return
-
-def:
- var n1 gc.Node
- regalloc(&n1, n.Type, nil)
- cgen(n, &n1)
- var n2 gc.Node
- gc.Nodconst(&n2, n.Type, 0)
- gins(optoas(gc.OCMP, n.Type), &n1, &n2)
- a := x86.AJNE
- if !true_ {
- a = x86.AJEQ
- }
- gc.Patch(gc.Gbranch(a, n.Type, likely), to)
- regfree(&n1)
- return
-}
-
-/*
- * n is on stack, either local variable
- * or return value from function call.
- * return n's offset from SP.
- */
-func stkof(n *gc.Node) int64 {
- switch n.Op {
- case gc.OINDREG:
- return n.Xoffset
-
- case gc.ODOT:
- t := n.Left.Type
- if gc.Isptr[t.Etype] {
- break
- }
- off := stkof(n.Left)
- if off == -1000 || off == 1000 {
- return off
- }
- return off + n.Xoffset
-
- case gc.OINDEX:
- t := n.Left.Type
- if !gc.Isfixedarray(t) {
- break
- }
- off := stkof(n.Left)
- if off == -1000 || off == 1000 {
- return off
- }
- if gc.Isconst(n.Right, gc.CTINT) {
- return off + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval)
- }
- return 1000
-
- case gc.OCALLMETH,
- gc.OCALLINTER,
- gc.OCALLFUNC:
- t := n.Left.Type
- if gc.Isptr[t.Etype] {
- t = t.Type
- }
-
- var flist gc.Iter
- t = gc.Structfirst(&flist, gc.Getoutarg(t))
- if t != nil {
- return t.Width
- }
- }
-
- // botch - probably failing to recognize address
- // arithmetic on the above. eg INDEX and DOT
- return -1000
-}
-
-/*
- * block copy:
- * memmove(&ns, &n, w);
- */
-func sgen(n *gc.Node, ns *gc.Node, w int64) {
- if gc.Debug['g'] != 0 {
- fmt.Printf("\nsgen w=%d\n", w)
- gc.Dump("r", n)
- gc.Dump("res", ns)
- }
-
- if n.Ullman >= gc.UINF && ns.Ullman >= gc.UINF {
- gc.Fatal("sgen UINF")
- }
-
- if w < 0 {
- gc.Fatal("sgen copy %d", w)
- }
-
- // If copying .args, that's all the results, so record definition sites
- // for them for the liveness analysis.
- if ns.Op == gc.ONAME && ns.Sym.Name == ".args" {
- for l := gc.Curfn.Dcl; l != nil; l = l.Next {
- if l.N.Class == gc.PPARAMOUT {
- gc.Gvardef(l.N)
- }
- }
- }
-
- // Avoid taking the address for simple enough types.
- if gc.Componentgen(n, ns) {
- return
- }
-
- if w == 0 {
- // evaluate side effects only
- var nodr gc.Node
- regalloc(&nodr, gc.Types[gc.Tptr], nil)
-
- agen(ns, &nodr)
- agen(n, &nodr)
- regfree(&nodr)
- return
- }
-
- // offset on the stack
- osrc := stkof(n)
-
- odst := stkof(ns)
-
- if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
- // osrc and odst both on stack, and at least one is in
- // an unknown position. Could generate code to test
- // for forward/backward copy, but instead just copy
- // to a temporary location first.
- var tmp gc.Node
- gc.Tempname(&tmp, n.Type)
-
- sgen(n, &tmp, w)
- sgen(&tmp, ns, w)
- return
- }
-
+func stackcopy(n, ns *gc.Node, osrc, odst, w int64) {
var noddi gc.Node
gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI)
var nodsi gc.Node
var nodl gc.Node
var nodr gc.Node
if n.Ullman >= ns.Ullman {
- agenr(n, &nodr, &nodsi)
+ gc.Agenr(n, &nodr, &nodsi)
if ns.Op == gc.ONAME {
gc.Gvardef(ns)
}
- agenr(ns, &nodl, &noddi)
+ gc.Agenr(ns, &nodl, &noddi)
} else {
if ns.Op == gc.ONAME {
gc.Gvardef(ns)
}
- agenr(ns, &nodl, &noddi)
- agenr(n, &nodr, &nodsi)
+ gc.Agenr(ns, &nodl, &noddi)
+ gc.Agenr(n, &nodr, &nodsi)
}
if nodl.Val.U.Reg != x86.REG_DI {
if nodr.Val.U.Reg != x86.REG_SI {
gmove(&nodr, &nodsi)
}
- regfree(&nodl)
- regfree(&nodr)
+ gc.Regfree(&nodl)
+ gc.Regfree(&nodr)
c := w % 8 // bytes
q := w / 8 // quads
}
func main() {
+ if obj.Getgoos() == "nacl" {
+ resvd = append(resvd, x86.REG_BP, x86.REG_SI)
+ } else if obj.Framepointer_enabled != 0 {
+ resvd = append(resvd, x86.REG_BP)
+ }
+
gc.Thearch.Thechar = thechar
gc.Thearch.Thestring = thestring
gc.Thearch.Thelinkarch = thelinkarch
gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = x86.REGSP
gc.Thearch.REGCTXT = x86.REGCTXT
+ gc.Thearch.REGCALLX = x86.REG_BX
+ gc.Thearch.REGCALLX2 = x86.REG_AX
+ gc.Thearch.REGRETURN = x86.REG_AX
+ gc.Thearch.REGMIN = x86.REG_AX
+ gc.Thearch.REGMAX = x86.REG_R15
+ gc.Thearch.FREGMIN = x86.REG_X0
+ gc.Thearch.FREGMAX = x86.REG_X15
gc.Thearch.MAXWIDTH = MAXWIDTH
- gc.Thearch.Anyregalloc = anyregalloc
+ gc.Thearch.ReservedRegs = resvd
+
+ gc.Thearch.AddIndex = addindex
gc.Thearch.Betypeinit = betypeinit
- gc.Thearch.Bgen = bgen
- gc.Thearch.Cgen = cgen
- gc.Thearch.Cgen_call = cgen_call
- gc.Thearch.Cgen_callinter = cgen_callinter
- gc.Thearch.Cgen_ret = cgen_ret
+ gc.Thearch.Cgen_bmul = cgen_bmul
+ gc.Thearch.Cgen_hmul = cgen_hmul
+ gc.Thearch.Cgen_shift = cgen_shift
gc.Thearch.Clearfat = clearfat
gc.Thearch.Defframe = defframe
+ gc.Thearch.Dodiv = dodiv
gc.Thearch.Excise = excise
gc.Thearch.Expandchecks = expandchecks
- gc.Thearch.Gclean = gclean
- gc.Thearch.Ginit = ginit
gc.Thearch.Gins = gins
- gc.Thearch.Ginscall = ginscall
+ gc.Thearch.Ginscon = ginscon
+ gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Gmove = gmove
- gc.Thearch.Igen = igen
gc.Thearch.Linkarchinit = linkarchinit
gc.Thearch.Peep = peep
gc.Thearch.Proginfo = proginfo
- gc.Thearch.Regalloc = regalloc
- gc.Thearch.Regfree = regfree
gc.Thearch.Regtyp = regtyp
gc.Thearch.Sameaddr = sameaddr
gc.Thearch.Smallindir = smallindir
gc.Thearch.Stackaddr = stackaddr
+ gc.Thearch.Stackcopy = stackcopy
+ gc.Thearch.Sudoaddable = sudoaddable
+ gc.Thearch.Sudoclean = sudoclean
gc.Thearch.Excludedregs = excludedregs
gc.Thearch.RtoB = RtoB
gc.Thearch.FtoB = FtoB
return q
}
-/*
- * generate:
- * call f
- * proc=-1 normal call but no return
- * proc=0 normal call
- * proc=1 goroutine run in new proc
- * proc=2 defer call save away stack
- * proc=3 normal call to C pointer (not Go func value)
-*/
-func ginscall(f *gc.Node, proc int) {
- if f.Type != nil {
- extra := int32(0)
- if proc == 1 || proc == 2 {
- extra = 2 * int32(gc.Widthptr)
- }
- gc.Setmaxarg(f.Type, extra)
- }
-
- switch proc {
- default:
- gc.Fatal("ginscall: bad proc %d", proc)
-
- case 0, // normal call
- -1: // normal call but no return
- if f.Op == gc.ONAME && f.Class == gc.PFUNC {
- if f == gc.Deferreturn {
- // Deferred calls will appear to be returning to
- // the CALL deferreturn(SB) that we are about to emit.
- // However, the stack trace code will show the line
- // of the instruction byte before the return PC.
- // To avoid that being an unrelated instruction,
- // insert an x86 NOP that we will have the right line number.
- // x86 NOP 0x90 is really XCHG AX, AX; use that description
- // because the NOP pseudo-instruction would be removed by
- // the linker.
- var reg gc.Node
- gc.Nodreg(®, gc.Types[gc.TINT], x86.REG_AX)
-
- gins(x86.AXCHGL, ®, ®)
- }
-
- p := gins(obj.ACALL, nil, f)
- gc.Afunclit(&p.To, f)
- if proc == -1 || gc.Noreturn(p) {
- gins(obj.AUNDEF, nil, nil)
- }
- break
- }
-
- var reg gc.Node
- gc.Nodreg(®, gc.Types[gc.Tptr], x86.REG_DX)
- var r1 gc.Node
- gc.Nodreg(&r1, gc.Types[gc.Tptr], x86.REG_BX)
- gmove(f, ®)
- reg.Op = gc.OINDREG
- gmove(®, &r1)
- reg.Op = gc.OREGISTER
- gins(obj.ACALL, ®, &r1)
-
- case 3: // normal call of c function pointer
- gins(obj.ACALL, nil, f)
-
- case 1, // call in new proc (go)
- 2: // deferred call (defer)
- var stk gc.Node
-
- stk.Op = gc.OINDREG
- stk.Val.U.Reg = x86.REG_SP
- stk.Xoffset = 0
-
- var reg gc.Node
- if gc.Widthptr == 8 {
- // size of arguments at 0(SP)
- ginscon(x86.AMOVQ, int64(gc.Argsize(f.Type)), &stk)
-
- // FuncVal* at 8(SP)
- stk.Xoffset = int64(gc.Widthptr)
-
- gc.Nodreg(®, gc.Types[gc.TINT64], x86.REG_AX)
- gmove(f, ®)
- gins(x86.AMOVQ, ®, &stk)
- } else {
- // size of arguments at 0(SP)
- ginscon(x86.AMOVL, int64(gc.Argsize(f.Type)), &stk)
-
- // FuncVal* at 4(SP)
- stk.Xoffset = int64(gc.Widthptr)
-
- gc.Nodreg(®, gc.Types[gc.TINT32], x86.REG_AX)
- gmove(f, ®)
- gins(x86.AMOVL, ®, &stk)
- }
-
- if proc == 1 {
- ginscall(gc.Newproc, 0)
- } else {
- if gc.Hasdefer == 0 {
- gc.Fatal("hasdefer=0 but has defer")
- }
- ginscall(gc.Deferproc, 0)
- }
-
- if proc == 2 {
- gc.Nodreg(®, gc.Types[gc.TINT32], x86.REG_AX)
- gins(x86.ATESTL, ®, ®)
- p := gc.Gbranch(x86.AJEQ, nil, +1)
- cgen_ret(nil)
- gc.Patch(p, gc.Pc)
- }
- }
-}
-
-/*
- * n is call to interface method.
- * generate res = n.
- */
-func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
- i := n.Left
- if i.Op != gc.ODOTINTER {
- gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
- }
-
- f := i.Right // field
- if f.Op != gc.ONAME {
- gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
- }
-
- i = i.Left // interface
-
- if i.Addable == 0 {
- var tmpi gc.Node
- gc.Tempname(&tmpi, i.Type)
- cgen(i, &tmpi)
- i = &tmpi
- }
-
- gc.Genlist(n.List) // assign the args
-
- // i is now addable, prepare an indirected
- // register to hold its address.
- var nodi gc.Node
- igen(i, &nodi, res) // REG = &inter
-
- var nodsp gc.Node
- gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], x86.REG_SP)
-
- nodsp.Xoffset = 0
- if proc != 0 {
- nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
- }
- nodi.Type = gc.Types[gc.Tptr]
- nodi.Xoffset += int64(gc.Widthptr)
- cgen(&nodi, &nodsp) // {0, 8(nacl), or 16}(SP) = 8(REG) -- i.data
-
- var nodo gc.Node
- regalloc(&nodo, gc.Types[gc.Tptr], res)
-
- nodi.Type = gc.Types[gc.Tptr]
- nodi.Xoffset -= int64(gc.Widthptr)
- cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
- regfree(&nodi)
-
- var nodr gc.Node
- regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
- if n.Left.Xoffset == gc.BADWIDTH {
- gc.Fatal("cgen_callinter: badwidth")
- }
- gc.Cgen_checknil(&nodo) // in case offset is huge
- nodo.Op = gc.OINDREG
- nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
- if proc == 0 {
- // plain call: use direct c function pointer - more efficient
- cgen(&nodo, &nodr) // REG = 32+offset(REG) -- i.tab->fun[f]
- proc = 3
- } else {
- // go/defer. generate go func value.
- gins(x86.ALEAQ, &nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
- }
-
- nodr.Type = n.Left.Type
- ginscall(&nodr, proc)
-
- regfree(&nodr)
- regfree(&nodo)
-}
-
-/*
- * generate function call;
- * proc=0 normal call
- * proc=1 goroutine run in new proc
- * proc=2 defer call save away stack
- */
-func cgen_call(n *gc.Node, proc int) {
- if n == nil {
- return
- }
-
- var afun gc.Node
- if n.Left.Ullman >= gc.UINF {
- // if name involves a fn call
- // precompute the address of the fn
- gc.Tempname(&afun, gc.Types[gc.Tptr])
-
- cgen(n.Left, &afun)
- }
-
- gc.Genlist(n.List) // assign the args
- t := n.Left.Type
-
- // call tempname pointer
- if n.Left.Ullman >= gc.UINF {
- var nod gc.Node
- regalloc(&nod, gc.Types[gc.Tptr], nil)
- gc.Cgen_as(&nod, &afun)
- nod.Type = t
- ginscall(&nod, proc)
- regfree(&nod)
- return
- }
-
- // call pointer
- if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
- var nod gc.Node
- regalloc(&nod, gc.Types[gc.Tptr], nil)
- gc.Cgen_as(&nod, n.Left)
- nod.Type = t
- ginscall(&nod, proc)
- regfree(&nod)
- return
- }
-
- // call direct
- n.Left.Method = 1
-
- ginscall(n.Left, proc)
-}
-
-/*
- * call to n has already been generated.
- * generate:
- * res = return value from call.
- */
-func cgen_callret(n *gc.Node, res *gc.Node) {
- t := n.Left.Type
- if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
- t = t.Type
- }
-
- var flist gc.Iter
- fp := gc.Structfirst(&flist, gc.Getoutarg(t))
- if fp == nil {
- gc.Fatal("cgen_callret: nil")
- }
-
- var nod gc.Node
- nod.Op = gc.OINDREG
- nod.Val.U.Reg = x86.REG_SP
- nod.Addable = 1
-
- nod.Xoffset = fp.Width
- nod.Type = fp.Type
- gc.Cgen_as(res, &nod)
-}
-
-/*
- * call to n has already been generated.
- * generate:
- * res = &return value from call.
- */
-func cgen_aret(n *gc.Node, res *gc.Node) {
- t := n.Left.Type
- if gc.Isptr[t.Etype] {
- t = t.Type
- }
-
- var flist gc.Iter
- fp := gc.Structfirst(&flist, gc.Getoutarg(t))
- if fp == nil {
- gc.Fatal("cgen_aret: nil")
- }
-
- var nod1 gc.Node
- nod1.Op = gc.OINDREG
- nod1.Val.U.Reg = x86.REG_SP
- nod1.Addable = 1
-
- nod1.Xoffset = fp.Width
- nod1.Type = fp.Type
-
- if res.Op != gc.OREGISTER {
- var nod2 gc.Node
- regalloc(&nod2, gc.Types[gc.Tptr], res)
- gins(leaptr, &nod1, &nod2)
- gins(movptr, &nod2, res)
- regfree(&nod2)
- } else {
- gins(leaptr, &nod1, res)
- }
-}
-
-/*
- * generate return.
- * n->left is assignments to return values.
- */
-func cgen_ret(n *gc.Node) {
- if n != nil {
- gc.Genlist(n.List) // copy out args
- }
- if gc.Hasdefer != 0 {
- ginscall(gc.Deferreturn, 0)
- }
- gc.Genlist(gc.Curfn.Exit)
- p := gins(obj.ARET, nil, nil)
- if n != nil && n.Op == gc.ORETJMP {
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.Linksym(n.Left.Sym)
- }
-}
-
/*
* generate division.
* generates one of:
a := optoas(op, t)
var n3 gc.Node
- regalloc(&n3, t0, nil)
+ gc.Regalloc(&n3, t0, nil)
var ax gc.Node
var oldax gc.Node
if nl.Ullman >= nr.Ullman {
savex(x86.REG_AX, &ax, &oldax, res, t0)
- cgen(nl, &ax)
- regalloc(&ax, t0, &ax) // mark ax live during cgen
- cgen(nr, &n3)
- regfree(&ax)
+ gc.Cgen(nl, &ax)
+ gc.Regalloc(&ax, t0, &ax) // mark ax live during cgen
+ gc.Cgen(nr, &n3)
+ gc.Regfree(&ax)
} else {
- cgen(nr, &n3)
+ gc.Cgen(nr, &n3)
savex(x86.REG_AX, &ax, &oldax, res, t0)
- cgen(nl, &ax)
+ gc.Cgen(nl, &ax)
}
if t != t0 {
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
- ginscall(panicdiv, -1)
+ gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
}
gins(optoas(gc.OEXTEND, t), nil, nil)
}
gins(a, &n3, nil)
- regfree(&n3)
+ gc.Regfree(&n3)
if op == gc.ODIV {
gmove(&ax, res)
} else {
gc.Nodreg(x, t, dr)
if r > 1 && !gc.Samereg(x, res) {
- regalloc(oldx, gc.Types[gc.TINT64], nil)
+ gc.Regalloc(oldx, gc.Types[gc.TINT64], nil)
x.Type = gc.Types[gc.TINT64]
gmove(x, oldx)
x.Type = t
x.Type = gc.Types[gc.TINT64]
reg[x.Val.U.Reg] = uint8(oldx.Ostk)
gmove(oldx, x)
- regfree(oldx)
- }
-}
-
-/*
- * generate division according to op, one of:
- * res = nl / nr
- * res = nl % nr
- */
-func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var w int
-
- if nr.Op != gc.OLITERAL {
- goto longdiv
- }
- w = int(nl.Type.Width * 8)
-
- // Front end handled 32-bit division. We only need to handle 64-bit.
- // try to do division by multiply by (2^w)/d
- // see hacker's delight chapter 10
- switch gc.Simtype[nl.Type.Etype] {
- default:
- goto longdiv
-
- case gc.TUINT64:
- var m gc.Magic
- m.W = w
- m.Ud = uint64(gc.Mpgetfix(nr.Val.U.Xval))
- gc.Umagic(&m)
- if m.Bad != 0 {
- break
- }
- if op == gc.OMOD {
- goto longmod
- }
-
- var n1 gc.Node
- cgenr(nl, &n1, nil)
- var n2 gc.Node
- gc.Nodconst(&n2, nl.Type, int64(m.Um))
- var n3 gc.Node
- regalloc(&n3, nl.Type, res)
- cgen_hmul(&n1, &n2, &n3)
-
- if m.Ua != 0 {
- // need to add numerator accounting for overflow
- gins(optoas(gc.OADD, nl.Type), &n1, &n3)
-
- gc.Nodconst(&n2, nl.Type, 1)
- gins(optoas(gc.ORROTC, nl.Type), &n2, &n3)
- gc.Nodconst(&n2, nl.Type, int64(m.S)-1)
- gins(optoas(gc.ORSH, nl.Type), &n2, &n3)
- } else {
- gc.Nodconst(&n2, nl.Type, int64(m.S))
- gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift dx
- }
-
- gmove(&n3, res)
- regfree(&n1)
- regfree(&n3)
- return
-
- case gc.TINT64:
- var m gc.Magic
- m.W = w
- m.Sd = gc.Mpgetfix(nr.Val.U.Xval)
- gc.Smagic(&m)
- if m.Bad != 0 {
- break
- }
- if op == gc.OMOD {
- goto longmod
- }
-
- var n1 gc.Node
- cgenr(nl, &n1, res)
- var n2 gc.Node
- gc.Nodconst(&n2, nl.Type, m.Sm)
- var n3 gc.Node
- regalloc(&n3, nl.Type, nil)
- cgen_hmul(&n1, &n2, &n3)
-
- if m.Sm < 0 {
- // need to add numerator
- gins(optoas(gc.OADD, nl.Type), &n1, &n3)
- }
-
- gc.Nodconst(&n2, nl.Type, int64(m.S))
- gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift n3
-
- gc.Nodconst(&n2, nl.Type, int64(w)-1)
-
- gins(optoas(gc.ORSH, nl.Type), &n2, &n1) // -1 iff num is neg
- gins(optoas(gc.OSUB, nl.Type), &n1, &n3) // added
-
- if m.Sd < 0 {
- // this could probably be removed
- // by factoring it into the multiplier
- gins(optoas(gc.OMINUS, nl.Type), nil, &n3)
- }
-
- gmove(&n3, res)
- regfree(&n1)
- regfree(&n3)
- return
+ gc.Regfree(oldx)
}
-
- goto longdiv
-
- // division and mod using (slow) hardware instruction
-longdiv:
- dodiv(op, nl, nr, res)
-
- return
-
- // mod using formula A%B = A-(A/B*B) but
- // we know that there is a fast algorithm for A/B
-longmod:
- var n1 gc.Node
- regalloc(&n1, nl.Type, res)
-
- cgen(nl, &n1)
- var n2 gc.Node
- regalloc(&n2, nl.Type, nil)
- cgen_div(gc.ODIV, &n1, nr, &n2)
- a := optoas(gc.OMUL, nl.Type)
- if w == 8 {
- // use 2-operand 16-bit multiply
- // because there is no 2-operand 8-bit multiply
- a = x86.AIMULW
- }
-
- if !gc.Smallintconst(nr) {
- var n3 gc.Node
- regalloc(&n3, nl.Type, nil)
- cgen(nr, &n3)
- gins(a, &n3, &n2)
- regfree(&n3)
- } else {
- gins(a, nr, &n2)
- }
- gins(optoas(gc.OSUB, nl.Type), &n2, &n1)
- gmove(&n1, res)
- regfree(&n1)
- regfree(&n2)
}
/*
}
var n1 gc.Node
- cgenr(nl, &n1, res)
+ gc.Cgenr(nl, &n1, res)
var n2 gc.Node
- cgenr(nr, &n2, nil)
+ gc.Cgenr(nr, &n2, nil)
var ax gc.Node
gc.Nodreg(&ax, t, x86.REG_AX)
gmove(&n1, &ax)
gins(a, &n2, nil)
- regfree(&n2)
- regfree(&n1)
+ gc.Regfree(&n2)
+ gc.Regfree(&n1)
var dx gc.Node
if t.Width == 1 {
if nr.Op == gc.OLITERAL {
var n1 gc.Node
- regalloc(&n1, nl.Type, res)
- cgen(nl, &n1)
+ gc.Regalloc(&n1, nl.Type, res)
+ gc.Cgen(nl, &n1)
sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
gins(a, nr, &n1)
}
gmove(&n1, res)
- regfree(&n1)
+ gc.Regfree(&n1)
return
}
if nl.Ullman >= gc.UINF {
var n4 gc.Node
gc.Tempname(&n4, nl.Type)
- cgen(nl, &n4)
+ gc.Cgen(nl, &n4)
nl = &n4
}
if nr.Ullman >= gc.UINF {
var n5 gc.Node
gc.Tempname(&n5, nr.Type)
- cgen(nr, &n5)
+ gc.Cgen(nr, &n5)
nr = &n5
}
tcount = gc.Types[gc.TUINT32]
}
- regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
+ gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
var n3 gc.Node
- regalloc(&n3, tcount, &n1) // to clear high bits of CX
+ gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
var oldcx gc.Node
if rcx > 0 && !gc.Samereg(&cx, res) {
- regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
+ gc.Regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
gmove(&cx, &oldcx)
}
var n2 gc.Node
if gc.Samereg(&cx, res) {
- regalloc(&n2, nl.Type, nil)
+ gc.Regalloc(&n2, nl.Type, nil)
} else {
- regalloc(&n2, nl.Type, res)
+ gc.Regalloc(&n2, nl.Type, res)
}
if nl.Ullman >= nr.Ullman {
- cgen(nl, &n2)
- cgen(nr, &n1)
+ gc.Cgen(nl, &n2)
+ gc.Cgen(nr, &n1)
gmove(&n1, &n3)
} else {
- cgen(nr, &n1)
+ gc.Cgen(nr, &n1)
gmove(&n1, &n3)
- cgen(nl, &n2)
+ gc.Cgen(nl, &n2)
}
- regfree(&n3)
+ gc.Regfree(&n3)
// test and fix up large shifts
if !bounded {
if oldcx.Op != 0 {
cx.Type = gc.Types[gc.TUINT64]
gmove(&oldcx, &cx)
- regfree(&oldcx)
+ gc.Regfree(&oldcx)
}
gmove(&n2, res)
- regfree(&n1)
- regfree(&n2)
+ gc.Regfree(&n1)
+ gc.Regfree(&n2)
}
/*
* there is no 2-operand byte multiply instruction so
* we do a full-width multiplication and truncate afterwards.
*/
-func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
+ if optoas(op, nl.Type) != x86.AIMULB {
+ return false
+ }
+
// largest ullman on left.
if nl.Ullman < nr.Ullman {
tmp := nl
// generate operands in "8-bit" registers.
var n1b gc.Node
- regalloc(&n1b, nl.Type, res)
+ gc.Regalloc(&n1b, nl.Type, res)
- cgen(nl, &n1b)
+ gc.Cgen(nl, &n1b)
var n2b gc.Node
- regalloc(&n2b, nr.Type, nil)
- cgen(nr, &n2b)
+ gc.Regalloc(&n2b, nr.Type, nil)
+ gc.Cgen(nr, &n2b)
// perform full-width multiplication.
t := gc.Types[gc.TUINT64]
// truncate.
gmove(&n1, res)
- regfree(&n1b)
- regfree(&n2b)
+ gc.Regfree(&n1b)
+ gc.Regfree(&n2b)
+ return true
}
func clearfat(nl *gc.Node) {
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
- agenr(nl, &n1, nil)
+ gc.Agenr(nl, &n1, nil)
n1.Op = gc.OINDREG
var z gc.Node
n1.Xoffset++
}
- regfree(&n1)
+ gc.Regfree(&n1)
return
}
var oldn1 gc.Node
var n1 gc.Node
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
- agen(nl, &n1)
+ gc.Agen(nl, &n1)
var ax gc.Node
var oldax gc.Node
p2.To.Offset = 0
}
}
+
+// addr += index*width if possible.
+func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
+ switch width {
+ case 1, 2, 4, 8:
+ p1 := gins(x86.ALEAQ, index, addr)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Scale = int16(width)
+ p1.From.Index = p1.From.Reg
+ p1.From.Reg = p1.To.Reg
+ return true
+ }
+ return false
+}
x86.REG_SP, // for stack
}
-func ginit() {
- for i := 0; i < len(reg); i++ {
- reg[i] = 1
- }
- for i := x86.REG_AX; i <= x86.REG_R15; i++ {
- reg[i] = 0
- }
- for i := x86.REG_X0; i <= x86.REG_X15; i++ {
- reg[i] = 0
- }
-
- for i := 0; i < len(resvd); i++ {
- reg[resvd[i]]++
- }
-
- if gc.Nacl {
- reg[x86.REG_BP]++
- reg[x86.REG_R15]++
- } else if obj.Framepointer_enabled != 0 {
- // BP is part of the calling convention of framepointer_enabled.
- reg[x86.REG_BP]++
- }
-}
-
-func gclean() {
- for i := 0; i < len(resvd); i++ {
- reg[resvd[i]]--
- }
- if gc.Nacl {
- reg[x86.REG_BP]--
- reg[x86.REG_R15]--
- } else if obj.Framepointer_enabled != 0 {
- reg[x86.REG_BP]--
- }
-
- for i := x86.REG_AX; i <= x86.REG_R15; i++ {
- if reg[i] != 0 {
- gc.Yyerror("reg %v left allocated\n", obj.Rconv(i))
- }
- }
- for i := x86.REG_X0; i <= x86.REG_X15; i++ {
- if reg[i] != 0 {
- gc.Yyerror("reg %v left allocated\n", obj.Rconv(i))
- }
- }
-}
-
-func anyregalloc() bool {
- var j int
-
- for i := x86.REG_AX; i <= x86.REG_R15; i++ {
- if reg[i] == 0 {
- goto ok
- }
- for j = 0; j < len(resvd); j++ {
- if resvd[j] == i {
- goto ok
- }
- }
- return true
- ok:
- }
-
- return false
-}
-
-var regpc [x86.REG_R15 + 1 - x86.REG_AX]uint32
-
-/*
- * allocate register of type t, leave in n.
- * if o != N, o is desired fixed register.
- * caller must regfree(n).
- */
-func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
- if t == nil {
- gc.Fatal("regalloc: t nil")
- }
- et := int(gc.Simtype[t.Etype])
-
- var i int
- switch et {
- case gc.TINT8,
- gc.TUINT8,
- gc.TINT16,
- gc.TUINT16,
- gc.TINT32,
- gc.TUINT32,
- gc.TINT64,
- gc.TUINT64,
- gc.TPTR32,
- gc.TPTR64,
- gc.TBOOL:
- if o != nil && o.Op == gc.OREGISTER {
- i = int(o.Val.U.Reg)
- if i >= x86.REG_AX && i <= x86.REG_R15 {
- goto out
- }
- }
-
- for i = x86.REG_AX; i <= x86.REG_R15; i++ {
- if reg[i] == 0 {
- regpc[i-x86.REG_AX] = uint32(obj.Getcallerpc(&n))
- goto out
- }
- }
-
- gc.Flusherrors()
- for i := 0; i+x86.REG_AX <= x86.REG_R15; i++ {
- fmt.Printf("%d %p\n", i, regpc[i])
- }
- gc.Fatal("out of fixed registers")
-
- case gc.TFLOAT32,
- gc.TFLOAT64:
- if o != nil && o.Op == gc.OREGISTER {
- i = int(o.Val.U.Reg)
- if i >= x86.REG_X0 && i <= x86.REG_X15 {
- goto out
- }
- }
-
- for i = x86.REG_X0; i <= x86.REG_X15; i++ {
- if reg[i] == 0 {
- goto out
- }
- }
- gc.Fatal("out of floating registers")
-
- case gc.TCOMPLEX64,
- gc.TCOMPLEX128:
- gc.Tempname(n, t)
- return
- }
-
- gc.Fatal("regalloc: unknown type %v", gc.Tconv(t, 0))
- return
-
-out:
- reg[i]++
- gc.Nodreg(n, t, i)
-}
-
-func regfree(n *gc.Node) {
- if n.Op == gc.ONAME {
- return
- }
- if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
- gc.Fatal("regfree: not a register")
- }
- i := int(n.Val.U.Reg)
- if i == x86.REG_SP {
- return
- }
- if i < 0 || i >= len(reg) {
- gc.Fatal("regfree: reg out of range")
- }
- if reg[i] <= 0 {
- gc.Fatal("regfree: reg not allocated")
- }
- reg[i]--
- if reg[i] == 0 && x86.REG_AX <= i && i <= x86.REG_R15 {
- regpc[i-x86.REG_AX] = 0
- }
-}
-
/*
* generate
* as $c, reg
// cannot have 64-bit immediate in ADD, etc.
// instead, MOV into register first.
var ntmp gc.Node
- regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+ gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gins(x86.AMOVQ, &n1, &ntmp)
gins(as, &ntmp, n2)
- regfree(&ntmp)
+ gc.Regfree(&ntmp)
return
}
}
bignodes()
var r1 gc.Node
- regalloc(&r1, gc.Types[ft], nil)
+ gc.Regalloc(&r1, gc.Types[ft], nil)
var r2 gc.Node
- regalloc(&r2, gc.Types[tt], t)
+ gc.Regalloc(&r2, gc.Types[tt], t)
var r3 gc.Node
- regalloc(&r3, gc.Types[ft], nil)
+ gc.Regalloc(&r3, gc.Types[ft], nil)
var r4 gc.Node
- regalloc(&r4, gc.Types[tt], nil)
+ gc.Regalloc(&r4, gc.Types[tt], nil)
gins(optoas(gc.OAS, f.Type), f, &r1)
gins(optoas(gc.OCMP, f.Type), &bigf, &r1)
p1 := gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1)
gins(x86.AXORQ, &r4, &r2)
gc.Patch(p2, gc.Pc)
gmove(&r2, t)
- regfree(&r4)
- regfree(&r3)
- regfree(&r2)
- regfree(&r1)
+ gc.Regfree(&r4)
+ gc.Regfree(&r3)
+ gc.Regfree(&r2)
+ gc.Regfree(&r1)
return
/*
var one gc.Node
gc.Nodconst(&one, gc.Types[gc.TUINT64], 1)
var r1 gc.Node
- regalloc(&r1, f.Type, f)
+ gc.Regalloc(&r1, f.Type, f)
var r2 gc.Node
- regalloc(&r2, t.Type, t)
+ gc.Regalloc(&r2, t.Type, t)
var r3 gc.Node
- regalloc(&r3, f.Type, nil)
+ gc.Regalloc(&r3, f.Type, nil)
var r4 gc.Node
- regalloc(&r4, f.Type, nil)
+ gc.Regalloc(&r4, f.Type, nil)
gmove(f, &r1)
gins(x86.ACMPQ, &r1, &zero)
p1 := gc.Gbranch(x86.AJLT, nil, +1)
gins(optoas(gc.OADD, t.Type), &r2, &r2)
gc.Patch(p2, gc.Pc)
gmove(&r2, t)
- regfree(&r4)
- regfree(&r3)
- regfree(&r2)
- regfree(&r1)
+ gc.Regfree(&r4)
+ gc.Regfree(&r3)
+ gc.Regfree(&r2)
+ gc.Regfree(&r1)
return
/*
rdst:
{
var r1 gc.Node
- regalloc(&r1, t.Type, t)
+ gc.Regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
}
// requires register intermediate
hard:
var r1 gc.Node
- regalloc(&r1, cvt, t)
+ gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
}
// Node nod;
// if(f != N && f->op == OINDEX) {
- // regalloc(&nod, ®node, Z);
+ // gc.Regalloc(&nod, ®node, Z);
// v = constnode.vconst;
- // cgen(f->right, &nod);
+ // gc.Cgen(f->right, &nod);
// constnode.vconst = v;
// idx.reg = nod.reg;
- // regfree(&nod);
+ // gc.Regfree(&nod);
// }
// if(t != N && t->op == OINDEX) {
- // regalloc(&nod, ®node, Z);
+ // gc.Regalloc(&nod, ®node, Z);
// v = constnode.vconst;
- // cgen(t->right, &nod);
+ // gc.Cgen(t->right, &nod);
// constnode.vconst = v;
// idx.reg = nod.reg;
- // regfree(&nod);
+ // gc.Regfree(&nod);
// }
+ if f != nil && f.Op == gc.OADDR && (as == x86.AMOVL || as == x86.AMOVQ) {
+ // Turn MOVL $xxx into LEAL xxx.
+ // These should be equivalent but most of the backend
+ // only expects to see LEAL, because that's what we had
+ // historically generated. Various hidden assumptions are baked in by now.
+ if as == x86.AMOVL {
+ as = x86.ALEAL
+ } else {
+ as = x86.ALEAQ
+ }
+ f = f.Left
+ }
+
switch as {
case x86.AMOVB,
x86.AMOVW,
return p
}
-func fixlargeoffset(n *gc.Node) {
- if n == nil {
- return
- }
- if n.Op != gc.OINDREG {
- return
- }
- if n.Val.U.Reg == x86.REG_SP { // stack offset cannot be large
- return
- }
- if n.Xoffset != int64(int32(n.Xoffset)) {
- // offset too large, add to register instead.
- a := *n
-
- a.Op = gc.OREGISTER
- a.Type = gc.Types[gc.Tptr]
- a.Xoffset = 0
- gc.Cgen_checknil(&a)
- ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a)
- n.Xoffset = 0
- }
+func ginsnop() {
+ // This is actually not the x86 NOP anymore,
+ // but at the point where it gets used, AX is dead
+ // so it's okay if we lose the high bits.
+ var reg gc.Node
+ gc.Nodreg(®, gc.Types[gc.TINT], x86.REG_AX)
+ gins(x86.AXCHGL, ®, ®)
}
/*
gc.ONE<<16 | gc.TFLOAT64:
a = x86.AJNE
+ case gc.OPS<<16 | gc.TBOOL,
+ gc.OPS<<16 | gc.TINT8,
+ gc.OPS<<16 | gc.TUINT8,
+ gc.OPS<<16 | gc.TINT16,
+ gc.OPS<<16 | gc.TUINT16,
+ gc.OPS<<16 | gc.TINT32,
+ gc.OPS<<16 | gc.TUINT32,
+ gc.OPS<<16 | gc.TINT64,
+ gc.OPS<<16 | gc.TUINT64,
+ gc.OPS<<16 | gc.TPTR32,
+ gc.OPS<<16 | gc.TPTR64,
+ gc.OPS<<16 | gc.TFLOAT32,
+ gc.OPS<<16 | gc.TFLOAT64:
+ a = x86.AJPS
+
case gc.OLT<<16 | gc.TINT8,
gc.OLT<<16 | gc.TINT16,
gc.OLT<<16 | gc.TINT32,
var cleani int = 0
-func xgen(n *gc.Node, a *gc.Node, o int) bool {
- regalloc(a, gc.Types[gc.Tptr], nil)
-
- if o&ODynam != 0 {
- if n.Addable != 0 {
- if n.Op != gc.OINDREG {
- if n.Op != gc.OREGISTER {
- return true
- }
- }
- }
- }
-
- agen(n, a)
- return false
-}
-
func sudoclean() {
if clean[cleani-1].Op != gc.OEMPTY {
- regfree(&clean[cleani-1])
+ gc.Regfree(&clean[cleani-1])
}
if clean[cleani-2].Op != gc.OEMPTY {
- regfree(&clean[cleani-2])
+ gc.Regfree(&clean[cleani-2])
}
cleani -= 2
}
return true
}
- regalloc(reg, gc.Types[gc.Tptr], nil)
+ gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
n1 := *reg
n1.Op = gc.OINDREG
if oary[0] >= 0 {
- agen(nn, reg)
+ gc.Agen(nn, reg)
n1.Xoffset = oary[0]
} else {
- cgen(nn, reg)
+ gc.Cgen(nn, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[0] + 1)
}
a.Type = obj.TYPE_NONE
a.Index = obj.TYPE_NONE
- fixlargeoffset(&n1)
+ gc.Fixlargeoffset(&n1)
gc.Naddr(a, &n1)
return true
"cmd/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
- "fmt"
)
-/*
- * peep.c
- */
-/*
- * generate:
- * res = n;
- * simplifies and calls gmove.
- */
-func cgen(n *gc.Node, res *gc.Node) {
- //print("cgen %N(%d) -> %N(%d)\n", n, n->addable, res, res->addable);
- if gc.Debug['g'] != 0 {
- gc.Dump("\ncgen-n", n)
- gc.Dump("cgen-res", res)
- }
-
- if n == nil || n.Type == nil {
- return
- }
-
- if res == nil || res.Type == nil {
- gc.Fatal("cgen: res nil")
- }
-
- for n.Op == gc.OCONVNOP {
- n = n.Left
- }
-
- switch n.Op {
- case gc.OSLICE,
- gc.OSLICEARR,
- gc.OSLICESTR,
- gc.OSLICE3,
- gc.OSLICE3ARR:
- if res.Op != gc.ONAME || res.Addable == 0 {
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_slice(n, &n1)
- cgen(&n1, res)
- } else {
- gc.Cgen_slice(n, res)
- }
- return
-
- case gc.OEFACE:
- if res.Op != gc.ONAME || res.Addable == 0 {
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_eface(n, &n1)
- cgen(&n1, res)
- } else {
- gc.Cgen_eface(n, res)
- }
- return
- }
-
- if n.Ullman >= gc.UINF {
- if n.Op == gc.OINDREG {
- gc.Fatal("cgen: this is going to misscompile")
- }
- if res.Ullman >= gc.UINF {
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- cgen(n, &n1)
- cgen(&n1, res)
- return
- }
- }
-
- if gc.Isfat(n.Type) {
- if n.Type.Width < 0 {
- gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
- }
- sgen(n, res, n.Type.Width)
- return
- }
-
- if res.Addable == 0 {
- if n.Ullman > res.Ullman {
- var n1 gc.Node
- regalloc(&n1, n.Type, res)
- cgen(n, &n1)
- if n1.Ullman > res.Ullman {
- gc.Dump("n1", &n1)
- gc.Dump("res", res)
- gc.Fatal("loop in cgen")
- }
-
- cgen(&n1, res)
- regfree(&n1)
- return
- }
-
- var f int
- if res.Ullman >= gc.UINF {
- goto gen
- }
-
- if gc.Complexop(n, res) {
- gc.Complexgen(n, res)
- return
- }
-
- f = 1 // gen thru register
- switch n.Op {
- case gc.OLITERAL:
- if gc.Smallintconst(n) {
- f = 0
- }
-
- case gc.OREGISTER:
- f = 0
- }
-
- if !gc.Iscomplex[n.Type.Etype] {
- a := optoas(gc.OAS, res.Type)
- var addr obj.Addr
- if sudoaddable(a, res, &addr) {
- var p1 *obj.Prog
- if f != 0 {
- var n2 gc.Node
- regalloc(&n2, res.Type, nil)
- cgen(n, &n2)
- p1 = gins(a, &n2, nil)
- regfree(&n2)
- } else {
- p1 = gins(a, n, nil)
- }
- p1.To = addr
- if gc.Debug['g'] != 0 {
- fmt.Printf("%v [ignore previous line]\n", p1)
- }
- sudoclean()
- return
- }
- }
-
- gen:
- var n1 gc.Node
- igen(res, &n1, nil)
- cgen(n, &n1)
- regfree(&n1)
- return
- }
-
- // update addressability for string, slice
- // can't do in walk because n->left->addable
- // changes if n->left is an escaping local variable.
- switch n.Op {
- case gc.OSPTR,
- gc.OLEN:
- if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
- n.Addable = n.Left.Addable
- }
-
- case gc.OCAP:
- if gc.Isslice(n.Left.Type) {
- n.Addable = n.Left.Addable
- }
-
- case gc.OITAB:
- n.Addable = n.Left.Addable
- }
-
- if gc.Complexop(n, res) {
- gc.Complexgen(n, res)
- return
- }
-
- // if both are addressable, move
- if n.Addable != 0 {
- if n.Op == gc.OREGISTER || res.Op == gc.OREGISTER {
- gmove(n, res)
- } else {
- var n1 gc.Node
- regalloc(&n1, n.Type, nil)
- gmove(n, &n1)
- cgen(&n1, res)
- regfree(&n1)
- }
-
- return
- }
-
- nl := n.Left
- nr := n.Right
-
- if nl != nil && nl.Ullman >= gc.UINF {
- if nr != nil && nr.Ullman >= gc.UINF {
- var n1 gc.Node
- gc.Tempname(&n1, nl.Type)
- cgen(nl, &n1)
- n2 := *n
- n2.Left = &n1
- cgen(&n2, res)
- return
- }
- }
-
- if !gc.Iscomplex[n.Type.Etype] {
- a := optoas(gc.OAS, n.Type)
- var addr obj.Addr
- if sudoaddable(a, n, &addr) {
- if res.Op == gc.OREGISTER {
- p1 := gins(a, nil, res)
- p1.From = addr
- } else {
- var n2 gc.Node
- regalloc(&n2, n.Type, nil)
- p1 := gins(a, nil, &n2)
- p1.From = addr
- gins(a, &n2, res)
- regfree(&n2)
- }
-
- sudoclean()
- return
- }
- }
-
- // TODO(minux): we shouldn't reverse FP comparisons, but then we need to synthesize
- // OGE, OLE, and ONE ourselves.
- // if(nl != N && isfloat[n->type->etype] && isfloat[nl->type->etype]) goto flt;
-
- var a int
- switch n.Op {
- default:
- gc.Dump("cgen", n)
- gc.Fatal("cgen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
-
- // these call bgen to get a bool value
- case gc.OOROR,
- gc.OANDAND,
- gc.OEQ,
- gc.ONE,
- gc.OLT,
- gc.OLE,
- gc.OGE,
- gc.OGT,
- gc.ONOT:
- p1 := gc.Gbranch(arm64.AB, nil, 0)
-
- p2 := gc.Pc
- gmove(gc.Nodbool(true), res)
- p3 := gc.Gbranch(arm64.AB, nil, 0)
- gc.Patch(p1, gc.Pc)
- bgen(n, true, 0, p2)
- gmove(gc.Nodbool(false), res)
- gc.Patch(p3, gc.Pc)
- return
-
- case gc.OPLUS:
- cgen(nl, res)
- return
-
- // unary
- case gc.OCOM:
- a := optoas(gc.OXOR, nl.Type)
-
- var n1 gc.Node
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
- var n2 gc.Node
- gc.Nodconst(&n2, nl.Type, -1)
- gins(a, &n2, &n1)
- gmove(&n1, res)
- regfree(&n1)
- return
-
- case gc.OMINUS:
- var n1 gc.Node
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
- gins(optoas(gc.OMINUS, nl.Type), &n1, &n1)
- gmove(&n1, res)
- regfree(&n1)
- return
-
- // symmetric binary
- case gc.OAND,
- gc.OOR,
- gc.OXOR,
- gc.OADD,
- gc.OMUL:
- a = optoas(int(n.Op), nl.Type)
-
- goto sbop
-
- // asymmetric binary
- case gc.OSUB:
- a = optoas(int(n.Op), nl.Type)
-
- goto abop
-
- case gc.OHMUL:
- cgen_hmul(nl, nr, res)
-
- case gc.OCONV:
- if n.Type.Width > nl.Type.Width {
- // If loading from memory, do conversion during load,
- // so as to avoid use of 8-bit register in, say, int(*byteptr).
- switch nl.Op {
- case gc.ODOT,
- gc.ODOTPTR,
- gc.OINDEX,
- gc.OIND,
- gc.ONAME:
- var n1 gc.Node
- igen(nl, &n1, res)
- var n2 gc.Node
- regalloc(&n2, n.Type, res)
- gmove(&n1, &n2)
- gmove(&n2, res)
- regfree(&n2)
- regfree(&n1)
- return
- }
- }
-
- var n1 gc.Node
- regalloc(&n1, nl.Type, res)
- var n2 gc.Node
- regalloc(&n2, n.Type, &n1)
- cgen(nl, &n1)
-
- // if we do the conversion n1 -> n2 here
- // reusing the register, then gmove won't
- // have to allocate its own register.
- gmove(&n1, &n2)
-
- gmove(&n2, res)
- regfree(&n2)
- regfree(&n1)
-
- case gc.ODOT,
- gc.ODOTPTR,
- gc.OINDEX,
- gc.OIND,
- gc.ONAME: // PHEAP or PPARAMREF var
- var n1 gc.Node
- igen(n, &n1, res)
-
- gmove(&n1, res)
- regfree(&n1)
-
- // interface table is first word of interface value
- case gc.OITAB:
- var n1 gc.Node
- igen(nl, &n1, res)
-
- n1.Type = n.Type
- gmove(&n1, res)
- regfree(&n1)
-
- // pointer is the first word of string or slice.
- case gc.OSPTR:
- if gc.Isconst(nl, gc.CTSTR) {
- var n1 gc.Node
- regalloc(&n1, gc.Types[gc.Tptr], res)
- p1 := gins(arm64.AMOVD, nil, &n1)
- gc.Datastring(nl.Val.U.Sval, &p1.From)
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- var n1 gc.Node
- igen(nl, &n1, res)
- n1.Type = n.Type
- gmove(&n1, res)
- regfree(&n1)
-
- case gc.OLEN:
- if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
- // map and chan have len in the first int-sized word.
- // a zero pointer means zero length
- var n1 gc.Node
- regalloc(&n1, gc.Types[gc.Tptr], res)
-
- cgen(nl, &n1)
-
- var n2 gc.Node
- gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
- gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
-
- n2 = n1
- n2.Op = gc.OINDREG
- n2.Type = gc.Types[gc.Simtype[gc.TINT]]
- gmove(&n2, &n1)
-
- gc.Patch(p1, gc.Pc)
-
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
- // both slice and string have len one pointer into the struct.
- // a zero pointer means zero length
- var n1 gc.Node
- igen(nl, &n1, res)
-
- n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
- n1.Xoffset += int64(gc.Array_nel)
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
-
- case gc.OCAP:
- if gc.Istype(nl.Type, gc.TCHAN) {
- // chan has cap in the second int-sized word.
- // a zero pointer means zero length
- var n1 gc.Node
- regalloc(&n1, gc.Types[gc.Tptr], res)
-
- cgen(nl, &n1)
-
- var n2 gc.Node
- gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
- gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
-
- n2 = n1
- n2.Op = gc.OINDREG
- n2.Xoffset = int64(gc.Widthint)
- n2.Type = gc.Types[gc.Simtype[gc.TINT]]
- gmove(&n2, &n1)
-
- gc.Patch(p1, gc.Pc)
-
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- if gc.Isslice(nl.Type) {
- var n1 gc.Node
- igen(nl, &n1, res)
- n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
- n1.Xoffset += int64(gc.Array_cap)
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
-
- case gc.OADDR:
- if n.Bounded { // let race detector avoid nil checks
- gc.Disable_checknil++
- }
- agen(nl, res)
- if n.Bounded {
- gc.Disable_checknil--
- }
-
- case gc.OCALLMETH:
- gc.Cgen_callmeth(n, 0)
- cgen_callret(n, res)
-
- case gc.OCALLINTER:
- cgen_callinter(n, res, 0)
- cgen_callret(n, res)
-
- case gc.OCALLFUNC:
- cgen_call(n, 0)
- cgen_callret(n, res)
-
- case gc.OMOD,
- gc.ODIV:
- if gc.Isfloat[n.Type.Etype] {
- a = optoas(int(n.Op), nl.Type)
- goto abop
- }
-
- if nl.Ullman >= nr.Ullman {
- var n1 gc.Node
- regalloc(&n1, nl.Type, res)
- cgen(nl, &n1)
- cgen_div(int(n.Op), &n1, nr, res)
- regfree(&n1)
- } else {
- var n2 gc.Node
- if !gc.Smallintconst(nr) {
- regalloc(&n2, nr.Type, res)
- cgen(nr, &n2)
- } else {
- n2 = *nr
- }
-
- cgen_div(int(n.Op), nl, &n2, res)
- if n2.Op != gc.OLITERAL {
- regfree(&n2)
- }
- }
-
- case gc.OLSH,
- gc.ORSH,
- gc.OLROT:
- cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
- }
-
- return
-
- /*
- * put simplest on right - we'll generate into left
- * and then adjust it using the computation of right.
- * constants and variables have the same ullman
- * count, so look for constants specially.
- *
- * an integer constant we can use as an immediate
- * is simpler than a variable - we can use the immediate
- * in the adjustment instruction directly - so it goes
- * on the right.
- *
- * other constants, like big integers or floating point
- * constants, require a mov into a register, so those
- * might as well go on the left, so we can reuse that
- * register for the computation.
- */
-sbop: // symmetric binary
- if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) {
- r := nl
- nl = nr
- nr = r
- }
-
-abop: // asymmetric binary
- var n1 gc.Node
- var n2 gc.Node
- if nl.Ullman >= nr.Ullman {
- regalloc(&n1, nl.Type, res)
- cgen(nl, &n1)
-
- /*
- * This generates smaller code - it avoids a MOV - but it's
- * easily 10% slower due to not being able to
- * optimize/manipulate the move.
- * To see, run: go test -bench . crypto/md5
- * with and without.
- *
- if(sudoaddable(a, nr, &addr)) {
- p1 = gins(a, N, &n1);
- p1->from = addr;
- gmove(&n1, res);
- sudoclean();
- regfree(&n1);
- goto ret;
- }
- *
- */
- // TODO(minux): enable using constants directly in certain instructions.
- //if(smallintconst(nr))
- // n2 = *nr;
- //else {
- regalloc(&n2, nr.Type, nil)
-
- cgen(nr, &n2)
- } else //}
- {
- //if(smallintconst(nr))
- // n2 = *nr;
- //else {
- regalloc(&n2, nr.Type, res)
-
- cgen(nr, &n2)
-
- //}
- regalloc(&n1, nl.Type, nil)
-
- cgen(nl, &n1)
- }
-
- gins(a, &n2, &n1)
-
- // Normalize result for types smaller than word.
- if n.Type.Width < int64(gc.Widthreg) {
- switch n.Op {
- case gc.OADD,
- gc.OSUB,
- gc.OMUL,
- gc.OLSH:
- gins(optoas(gc.OAS, n.Type), &n1, &n1)
- }
- }
-
- gmove(&n1, res)
- regfree(&n1)
- if n2.Op != gc.OLITERAL {
- regfree(&n2)
- }
- return
-}
-
-/*
- * allocate a register (reusing res if possible) and generate
- * a = n
- * The caller must call regfree(a).
- */
-func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("cgenr-n", n)
- }
-
- if gc.Isfat(n.Type) {
- gc.Fatal("cgenr on fat node")
- }
-
- if n.Addable != 0 {
- regalloc(a, n.Type, res)
- gmove(n, a)
- return
- }
-
- switch n.Op {
- case gc.ONAME,
- gc.ODOT,
- gc.ODOTPTR,
- gc.OINDEX,
- gc.OCALLFUNC,
- gc.OCALLMETH,
- gc.OCALLINTER:
- var n1 gc.Node
- igen(n, &n1, res)
- regalloc(a, gc.Types[gc.Tptr], &n1)
- gmove(&n1, a)
- regfree(&n1)
-
- default:
- regalloc(a, n.Type, res)
- cgen(n, a)
- }
-}
-
-/*
- * allocate a register (reusing res if possible) and generate
- * a = &n
- * The caller must call regfree(a).
- * The generated code checks that the result is not nil.
- */
-func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("agenr-n", n)
- }
-
- nl := n.Left
- nr := n.Right
-
- switch n.Op {
- case gc.ODOT,
- gc.ODOTPTR,
- gc.OCALLFUNC,
- gc.OCALLMETH,
- gc.OCALLINTER:
- var n1 gc.Node
- igen(n, &n1, res)
- regalloc(a, gc.Types[gc.Tptr], &n1)
- agen(&n1, a)
- regfree(&n1)
-
- case gc.OIND:
- cgenr(n.Left, a, res)
- gc.Cgen_checknil(a)
-
- case gc.OINDEX:
- p2 := (*obj.Prog)(nil) // to be patched to panicindex.
- w := uint32(n.Type.Width)
-
- //bounded = debug['B'] || n->bounded;
- var n3 gc.Node
- var n1 gc.Node
- if nr.Addable != 0 {
- var tmp gc.Node
- if !gc.Isconst(nr, gc.CTINT) {
- gc.Tempname(&tmp, gc.Types[gc.TINT64])
- }
- if !gc.Isconst(nl, gc.CTSTR) {
- agenr(nl, &n3, res)
- }
- if !gc.Isconst(nr, gc.CTINT) {
- cgen(nr, &tmp)
- regalloc(&n1, tmp.Type, nil)
- gmove(&tmp, &n1)
- }
- } else if nl.Addable != 0 {
- if !gc.Isconst(nr, gc.CTINT) {
- var tmp gc.Node
- gc.Tempname(&tmp, gc.Types[gc.TINT64])
- cgen(nr, &tmp)
- regalloc(&n1, tmp.Type, nil)
- gmove(&tmp, &n1)
- }
-
- if !gc.Isconst(nl, gc.CTSTR) {
- agenr(nl, &n3, res)
- }
- } else {
- var tmp gc.Node
- gc.Tempname(&tmp, gc.Types[gc.TINT64])
- cgen(nr, &tmp)
- nr = &tmp
- if !gc.Isconst(nl, gc.CTSTR) {
- agenr(nl, &n3, res)
- }
- regalloc(&n1, tmp.Type, nil)
- gins(optoas(gc.OAS, tmp.Type), &tmp, &n1)
- }
-
- // &a is in &n3 (allocated in res)
- // i is in &n1 (if not constant)
- // w is width
-
- // constant index
- if gc.Isconst(nr, gc.CTINT) {
- if gc.Isconst(nl, gc.CTSTR) {
- gc.Fatal("constant string constant index")
- }
- v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
- if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
- if gc.Debug['B'] == 0 && !n.Bounded {
- n1 = n3
- n1.Op = gc.OINDREG
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset = int64(gc.Array_nel)
- var n4 gc.Node
- regalloc(&n4, n1.Type, nil)
- gmove(&n1, &n4)
- ginscon2(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n4, int64(v))
- regfree(&n4)
- p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT64]), nil, +1)
- ginscall(gc.Panicindex, 0)
- gc.Patch(p1, gc.Pc)
- }
-
- n1 = n3
- n1.Op = gc.OINDREG
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset = int64(gc.Array_array)
- gmove(&n1, &n3)
- }
-
- if v*uint64(w) != 0 {
- ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), int64(v*uint64(w)), &n3)
- }
-
- *a = n3
- break
- }
-
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.TINT64], &n1) // i
- gmove(&n1, &n2)
- regfree(&n1)
-
- var n4 gc.Node
- if gc.Debug['B'] == 0 && !n.Bounded {
- // check bounds
- if gc.Isconst(nl, gc.CTSTR) {
- gc.Nodconst(&n4, gc.Types[gc.TUINT64], int64(len(nl.Val.U.Sval)))
- } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
- n1 = n3
- n1.Op = gc.OINDREG
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset = int64(gc.Array_nel)
- regalloc(&n4, gc.Types[gc.TUINT64], nil)
- gmove(&n1, &n4)
- } else {
- if nl.Type.Bound < (1<<15)-1 {
- gc.Nodconst(&n4, gc.Types[gc.TUINT64], nl.Type.Bound)
- } else {
- regalloc(&n4, gc.Types[gc.TUINT64], nil)
- p1 := gins(arm64.AMOVD, nil, &n4)
- p1.From.Type = obj.TYPE_CONST
- p1.From.Offset = nl.Type.Bound
- }
- }
-
- gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n2, &n4)
- if n4.Op == gc.OREGISTER {
- regfree(&n4)
- }
- p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
- if p2 != nil {
- gc.Patch(p2, gc.Pc)
- }
- ginscall(gc.Panicindex, 0)
- gc.Patch(p1, gc.Pc)
- }
-
- if gc.Isconst(nl, gc.CTSTR) {
- regalloc(&n3, gc.Types[gc.Tptr], res)
- p1 := gins(arm64.AMOVD, nil, &n3)
- gc.Datastring(nl.Val.U.Sval, &p1.From)
- p1.From.Type = obj.TYPE_ADDR
- } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
- n1 = n3
- n1.Op = gc.OINDREG
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset = int64(gc.Array_array)
- gmove(&n1, &n3)
- }
-
- if w == 0 {
- } else // nothing to do
- if w == 1 {
- /* w already scaled */
- gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
- /* else if(w == 2 || w == 4 || w == 8) {
- // TODO(minux): scale using shift
- } */
- } else {
- regalloc(&n4, gc.Types[gc.TUINT64], nil)
- gc.Nodconst(&n1, gc.Types[gc.TUINT64], int64(w))
- gmove(&n1, &n4)
- gins(optoas(gc.OMUL, gc.Types[gc.TUINT64]), &n4, &n2)
- gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
- regfree(&n4)
- }
-
- *a = n3
- regfree(&n2)
-
- default:
- regalloc(a, gc.Types[gc.Tptr], res)
- agen(n, a)
- }
-}
-
-func ginsadd(as int, off int64, dst *gc.Node) {
- var n1 gc.Node
-
- regalloc(&n1, gc.Types[gc.Tptr], dst)
- gmove(dst, &n1)
- ginscon(as, off, &n1)
- gmove(&n1, dst)
- regfree(&n1)
-}
-
-/*
- * generate:
- * res = &n;
- * The generated code checks that the result is not nil.
- */
-func agen(n *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\nagen-res", res)
- gc.Dump("agen-r", n)
- }
-
- if n == nil || n.Type == nil {
- return
- }
-
- for n.Op == gc.OCONVNOP {
- n = n.Left
- }
-
- if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
- // Use of a nil interface or nil slice.
- // Create a temporary we can take the address of and read.
- // The generated code is just going to panic, so it need not
- // be terribly efficient. See issue 3670.
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
-
- gc.Gvardef(&n1)
- clearfat(&n1)
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.Tptr], res)
- var n3 gc.Node
- n3.Op = gc.OADDR
- n3.Left = &n1
- gins(arm64.AMOVD, &n3, &n2)
- gmove(&n2, res)
- regfree(&n2)
- return
- }
-
- if n.Addable != 0 {
- var n1 gc.Node
- n1.Op = gc.OADDR
- n1.Left = n
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.Tptr], res)
- gins(arm64.AMOVD, &n1, &n2)
- gmove(&n2, res)
- regfree(&n2)
- return
- }
-
- nl := n.Left
-
- switch n.Op {
- default:
- gc.Fatal("agen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
-
- // TODO(minux): 5g has this: Release res so that it is available for cgen_call.
- // Pick it up again after the call for OCALLMETH and OCALLFUNC.
- case gc.OCALLMETH:
- gc.Cgen_callmeth(n, 0)
-
- cgen_aret(n, res)
-
- case gc.OCALLINTER:
- cgen_callinter(n, res, 0)
- cgen_aret(n, res)
-
- case gc.OCALLFUNC:
- cgen_call(n, 0)
- cgen_aret(n, res)
-
- case gc.OSLICE,
- gc.OSLICEARR,
- gc.OSLICESTR,
- gc.OSLICE3,
- gc.OSLICE3ARR:
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_slice(n, &n1)
- agen(&n1, res)
-
- case gc.OEFACE:
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_eface(n, &n1)
- agen(&n1, res)
-
- case gc.OINDEX:
- var n1 gc.Node
- agenr(n, &n1, res)
- gmove(&n1, res)
- regfree(&n1)
-
- // should only get here with names in this func.
- case gc.ONAME:
- if n.Funcdepth > 0 && n.Funcdepth != gc.Funcdepth {
- gc.Dump("bad agen", n)
- gc.Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, gc.Funcdepth)
- }
-
- // should only get here for heap vars or paramref
- if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
- gc.Dump("bad agen", n)
- gc.Fatal("agen: bad ONAME class %#x", n.Class)
- }
-
- cgen(n.Heapaddr, res)
- if n.Xoffset != 0 {
- ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
- }
-
- case gc.OIND:
- cgen(nl, res)
- gc.Cgen_checknil(res)
-
- case gc.ODOT:
- agen(nl, res)
- if n.Xoffset != 0 {
- ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
- }
-
- case gc.ODOTPTR:
- cgen(nl, res)
- gc.Cgen_checknil(res)
- if n.Xoffset != 0 {
- ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
- }
- }
-}
-
-/*
- * generate:
- * newreg = &n;
- * res = newreg
- *
- * on exit, a has been changed to be *newreg.
- * caller must regfree(a).
- * The generated code checks that the result is not *nil.
- */
-func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\nigen-n", n)
- }
-
- switch n.Op {
- case gc.ONAME:
- if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
- break
- }
- *a = *n
- return
-
- // Increase the refcount of the register so that igen's caller
- // has to call regfree.
- case gc.OINDREG:
- if n.Val.U.Reg != arm64.REGSP {
- reg[n.Val.U.Reg]++
- }
- *a = *n
- return
-
- case gc.ODOT:
- igen(n.Left, a, res)
- a.Xoffset += n.Xoffset
- a.Type = n.Type
- fixlargeoffset(a)
- return
-
- case gc.ODOTPTR:
- cgenr(n.Left, a, res)
- gc.Cgen_checknil(a)
- a.Op = gc.OINDREG
- a.Xoffset += n.Xoffset
- a.Type = n.Type
- fixlargeoffset(a)
- return
-
- case gc.OCALLFUNC,
- gc.OCALLMETH,
- gc.OCALLINTER:
- switch n.Op {
- case gc.OCALLFUNC:
- cgen_call(n, 0)
-
- case gc.OCALLMETH:
- gc.Cgen_callmeth(n, 0)
-
- case gc.OCALLINTER:
- cgen_callinter(n, nil, 0)
- }
-
- var flist gc.Iter
- fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
- *a = gc.Node{}
- a.Op = gc.OINDREG
- a.Val.U.Reg = arm64.REGSP
- a.Addable = 1
- a.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved lr at 0(SP)
- a.Type = n.Type
- return
-
- // Index of fixed-size array by constant can
- // put the offset in the addressing.
- // Could do the same for slice except that we need
- // to use the real index for the bounds checking.
- case gc.OINDEX:
- if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] && gc.Isfixedarray(n.Left.Left.Type)) {
- if gc.Isconst(n.Right, gc.CTINT) {
- // Compute &a.
- if !gc.Isptr[n.Left.Type.Etype] {
- igen(n.Left, a, res)
- } else {
- var n1 gc.Node
- igen(n.Left, &n1, res)
- gc.Cgen_checknil(&n1)
- regalloc(a, gc.Types[gc.Tptr], res)
- gmove(&n1, a)
- regfree(&n1)
- a.Op = gc.OINDREG
- }
-
- // Compute &a[i] as &a + i*width.
- a.Type = n.Type
-
- a.Xoffset += gc.Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width
- fixlargeoffset(a)
- return
- }
- }
- }
-
- agenr(n, a, res)
- a.Op = gc.OINDREG
- a.Type = n.Type
-}
-
-/*
- * generate:
- * if(n == true) goto to;
- */
-func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\nbgen", n)
- }
-
- if n == nil {
- n = gc.Nodbool(true)
- }
-
- if n.Ninit != nil {
- gc.Genlist(n.Ninit)
- }
-
- if n.Type == nil {
- gc.Convlit(&n, gc.Types[gc.TBOOL])
- if n.Type == nil {
- return
- }
- }
-
- et := int(n.Type.Etype)
- if et != gc.TBOOL {
- gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
- gc.Patch(gins(obj.AEND, nil, nil), to)
- return
- }
-
- var nr *gc.Node
-
- for n.Op == gc.OCONVNOP {
- n = n.Left
- if n.Ninit != nil {
- gc.Genlist(n.Ninit)
- }
- }
-
- var nl *gc.Node
- switch n.Op {
- default:
- var n1 gc.Node
- regalloc(&n1, n.Type, nil)
- cgen(n, &n1)
- var n2 gc.Node
- gc.Nodconst(&n2, n.Type, 0)
- gcmp(optoas(gc.OCMP, n.Type), &n1, &n2)
- a := arm64.ABNE
- if !true_ {
- a = arm64.ABEQ
- }
- gc.Patch(gc.Gbranch(a, n.Type, likely), to)
- regfree(&n1)
- return
-
- // need to ask if it is bool?
- case gc.OLITERAL:
- if !true_ == (n.Val.U.Bval == 0) {
- gc.Patch(gc.Gbranch(arm64.AB, nil, likely), to)
- }
- return
-
- case gc.OANDAND,
- gc.OOROR:
- if (n.Op == gc.OANDAND) == true_ {
- p1 := gc.Gbranch(obj.AJMP, nil, 0)
- p2 := gc.Gbranch(obj.AJMP, nil, 0)
- gc.Patch(p1, gc.Pc)
- bgen(n.Left, !true_, -likely, p2)
- bgen(n.Right, !true_, -likely, p2)
- p1 = gc.Gbranch(obj.AJMP, nil, 0)
- gc.Patch(p1, to)
- gc.Patch(p2, gc.Pc)
- } else {
- bgen(n.Left, true_, likely, to)
- bgen(n.Right, true_, likely, to)
- }
-
- return
-
- case gc.OEQ,
- gc.ONE,
- gc.OLT,
- gc.OGT,
- gc.OLE,
- gc.OGE:
- nr = n.Right
- if nr == nil || nr.Type == nil {
- return
- }
- fallthrough
-
- case gc.ONOT: // unary
- nl = n.Left
-
- if nl == nil || nl.Type == nil {
- return
- }
- }
-
- switch n.Op {
- case gc.ONOT:
- bgen(nl, !true_, likely, to)
- return
-
- case gc.OEQ,
- gc.ONE,
- gc.OLT,
- gc.OGT,
- gc.OLE,
- gc.OGE:
- a := int(n.Op)
- if !true_ {
- if gc.Isfloat[nr.Type.Etype] {
- // brcom is not valid on floats when NaN is involved.
- p1 := gc.Gbranch(arm64.AB, nil, 0)
-
- p2 := gc.Gbranch(arm64.AB, nil, 0)
- gc.Patch(p1, gc.Pc)
- ll := n.Ninit // avoid re-genning ninit
- n.Ninit = nil
- bgen(n, true, -likely, p2)
- n.Ninit = ll
- gc.Patch(gc.Gbranch(arm64.AB, nil, 0), to)
- gc.Patch(p2, gc.Pc)
- return
- }
-
- a = gc.Brcom(a)
- true_ = !true_
- }
-
- // make simplest on right
- if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
- a = gc.Brrev(a)
- r := nl
- nl = nr
- nr = r
- }
-
- if gc.Isslice(nl.Type) {
- // front end should only leave cmp to literal nil
- if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
- gc.Yyerror("illegal slice comparison")
- break
- }
-
- a = optoas(a, gc.Types[gc.Tptr])
- var n1 gc.Node
- igen(nl, &n1, nil)
- n1.Xoffset += int64(gc.Array_array)
- n1.Type = gc.Types[gc.Tptr]
- var tmp gc.Node
- gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.Tptr], &n1)
- gmove(&n1, &n2)
- gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp)
- regfree(&n2)
- gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
- regfree(&n1)
- break
- }
-
- if gc.Isinter(nl.Type) {
- // front end should only leave cmp to literal nil
- if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
- gc.Yyerror("illegal interface comparison")
- break
- }
-
- a = optoas(a, gc.Types[gc.Tptr])
- var n1 gc.Node
- igen(nl, &n1, nil)
- n1.Type = gc.Types[gc.Tptr]
- var tmp gc.Node
- gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.Tptr], &n1)
- gmove(&n1, &n2)
- gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp)
- regfree(&n2)
- gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
- regfree(&n1)
- break
- }
-
- if gc.Iscomplex[nl.Type.Etype] {
- gc.Complexbool(a, nl, nr, true_, likely, to)
- break
- }
-
- var n1 gc.Node
- var n2 gc.Node
- if nr.Ullman >= gc.UINF {
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
-
- var tmp gc.Node
- gc.Tempname(&tmp, nl.Type)
- gmove(&n1, &tmp)
- regfree(&n1)
-
- regalloc(&n2, nr.Type, nil)
- cgen(nr, &n2)
-
- regalloc(&n1, nl.Type, nil)
- cgen(&tmp, &n1)
-
- goto cmp
- }
-
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
- regalloc(&n2, nr.Type, nil)
- cgen(nr, &n2)
-
- cmp:
- l := &n1
- r := &n2
- gcmp(optoas(gc.OCMP, nr.Type), l, r)
- if gc.Isfloat[nr.Type.Etype] && (a == gc.OLE || a == gc.OGE) {
- // To get NaN right, must rewrite x <= y into separate x < y or x = y.
- switch a {
- case gc.OLE:
- a = gc.OLT
-
- case gc.OGE:
- a = gc.OGT
- }
-
- gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
- gc.Patch(gc.Gbranch(optoas(gc.OEQ, nr.Type), nr.Type, likely), to)
- } else {
- gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
- }
-
- regfree(&n1)
- regfree(&n2)
- }
-
- return
-}
-
-/*
- * n is on stack, either local variable
- * or return value from function call.
- * return n's offset from SP.
- */
-func stkof(n *gc.Node) int64 {
- switch n.Op {
- case gc.OINDREG:
- return n.Xoffset
-
- case gc.ODOT:
- t := n.Left.Type
- if gc.Isptr[t.Etype] {
- break
- }
- off := stkof(n.Left)
- if off == -1000 || off == 1000 {
- return off
- }
- return off + n.Xoffset
-
- case gc.OINDEX:
- t := n.Left.Type
- if !gc.Isfixedarray(t) {
- break
- }
- off := stkof(n.Left)
- if off == -1000 || off == 1000 {
- return off
- }
- if gc.Isconst(n.Right, gc.CTINT) {
- return off + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval)
- }
- return 1000
-
- case gc.OCALLMETH,
- gc.OCALLINTER,
- gc.OCALLFUNC:
- t := n.Left.Type
- if gc.Isptr[t.Etype] {
- t = t.Type
- }
-
- var flist gc.Iter
- t = gc.Structfirst(&flist, gc.Getoutarg(t))
- if t != nil {
- return t.Width + int64(gc.Widthptr) // +widthptr: correct for saved LR
- }
- }
-
- // botch - probably failing to recognize address
- // arithmetic on the above. eg INDEX and DOT
- return -1000
-}
-
-/*
- * block copy:
- * memmove(&ns, &n, w);
- */
-func sgen(n *gc.Node, ns *gc.Node, w int64) {
- var res *gc.Node = ns
-
- if gc.Debug['g'] != 0 {
- fmt.Printf("\nsgen w=%d\n", w)
- gc.Dump("r", n)
- gc.Dump("res", ns)
- }
-
- if n.Ullman >= gc.UINF && ns.Ullman >= gc.UINF {
- gc.Fatal("sgen UINF")
- }
-
- if w < 0 {
- gc.Fatal("sgen copy %d", w)
- }
-
- // If copying .args, that's all the results, so record definition sites
- // for them for the liveness analysis.
- if ns.Op == gc.ONAME && ns.Sym.Name == ".args" {
- for l := gc.Curfn.Dcl; l != nil; l = l.Next {
- if l.N.Class == gc.PPARAMOUT {
- gc.Gvardef(l.N)
- }
- }
- }
-
- // Avoid taking the address for simple enough types.
- //if gc.Componentgen(n, ns) {
- // return
- //}
-
- if w == 0 {
- // evaluate side effects only.
- var dst gc.Node
- regalloc(&dst, gc.Types[gc.Tptr], nil)
-
- agen(res, &dst)
- agen(n, &dst)
- regfree(&dst)
- return
- }
-
+func stackcopy(n, res *gc.Node, osrc, odst, w int64) {
// determine alignment.
// want to avoid unaligned access, so have to use
// smaller operations for less aligned types.
}
c := int32(w / int64(align))
- // offset on the stack
- osrc := int32(stkof(n))
-
- odst := int32(stkof(res))
- if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
- // osrc and odst both on stack, and at least one is in
- // an unknown position. Could generate code to test
- // for forward/backward copy, but instead just copy
- // to a temporary location first.
- var tmp gc.Node
- gc.Tempname(&tmp, n.Type)
-
- sgen(n, &tmp, w)
- sgen(&tmp, res, w)
- return
- }
-
- if osrc%int32(align) != 0 || odst%int32(align) != 0 {
+ if osrc%int64(align) != 0 || odst%int64(align) != 0 {
gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
}
var dst gc.Node
var src gc.Node
if n.Ullman >= res.Ullman {
- agenr(n, &dst, res) // temporarily use dst
- regalloc(&src, gc.Types[gc.Tptr], nil)
+ gc.Agenr(n, &dst, res) // temporarily use dst
+ gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
gins(arm64.AMOVD, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
- agen(res, &dst)
+ gc.Agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
- agenr(res, &dst, res)
- agenr(n, &src, nil)
+ gc.Agenr(res, &dst, res)
+ gc.Agenr(n, &src, nil)
}
var tmp gc.Node
- regalloc(&tmp, gc.Types[gc.Tptr], nil)
+ gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
// set up end marker
var nend gc.Node
// move src and dest to the end of block if necessary
if dir < 0 {
if c >= 4 {
- regalloc(&nend, gc.Types[gc.Tptr], nil)
+ gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
gins(arm64.AMOVD, &src, &nend)
}
p.From.Offset = int64(-dir)
if c >= 4 {
- regalloc(&nend, gc.Types[gc.Tptr], nil)
+ gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
p := gins(arm64.AMOVD, &src, &nend)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w
p = gcmp(arm64.ACMP, &src, &nend)
gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), ploop)
- regfree(&nend)
+ gc.Regfree(&nend)
} else {
// TODO(austin): Instead of generating ADD $-8,R8; ADD
// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
}
}
- regfree(&dst)
- regfree(&src)
- regfree(&tmp)
+ gc.Regfree(&dst)
+ gc.Regfree(&src)
+ gc.Regfree(&tmp)
}
gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = arm64.REGSP
gc.Thearch.REGCTXT = arm64.REGCTXT
+ gc.Thearch.REGCALLX = arm64.REGRT1
+ gc.Thearch.REGCALLX2 = arm64.REGRT2
+ gc.Thearch.REGRETURN = arm64.REG_R0
+ gc.Thearch.REGMIN = arm64.REG_R0
+ gc.Thearch.REGMAX = arm64.REG_R31
+ gc.Thearch.FREGMIN = arm64.REG_F0
+ gc.Thearch.FREGMAX = arm64.REG_F31
gc.Thearch.MAXWIDTH = MAXWIDTH
- gc.Thearch.Anyregalloc = anyregalloc
+ gc.Thearch.ReservedRegs = resvd
+
gc.Thearch.Betypeinit = betypeinit
- gc.Thearch.Bgen = bgen
- gc.Thearch.Cgen = cgen
- gc.Thearch.Cgen_call = cgen_call
- gc.Thearch.Cgen_callinter = cgen_callinter
- gc.Thearch.Cgen_ret = cgen_ret
+ gc.Thearch.Cgen_hmul = cgen_hmul
+ gc.Thearch.Cgen_shift = cgen_shift
gc.Thearch.Clearfat = clearfat
gc.Thearch.Defframe = defframe
+ gc.Thearch.Dodiv = dodiv
gc.Thearch.Excise = excise
gc.Thearch.Expandchecks = expandchecks
- gc.Thearch.Gclean = gclean
- gc.Thearch.Ginit = ginit
gc.Thearch.Gins = gins
- gc.Thearch.Ginscall = ginscall
+ gc.Thearch.Ginscon = ginscon
+ gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Gmove = gmove
- gc.Thearch.Igen = igen
gc.Thearch.Linkarchinit = linkarchinit
gc.Thearch.Peep = peep
gc.Thearch.Proginfo = proginfo
- gc.Thearch.Regalloc = regalloc
- gc.Thearch.Regfree = regfree
gc.Thearch.Regtyp = regtyp
gc.Thearch.Sameaddr = sameaddr
gc.Thearch.Smallindir = smallindir
gc.Thearch.Stackaddr = stackaddr
+ gc.Thearch.Stackcopy = stackcopy
+ gc.Thearch.Sudoaddable = sudoaddable
+ gc.Thearch.Sudoclean = sudoclean
gc.Thearch.Excludedregs = excludedregs
gc.Thearch.RtoB = RtoB
gc.Thearch.FtoB = RtoB
return q
}
-/*
- * generate:
- * call f
- * proc=-1 normal call but no return
- * proc=0 normal call
- * proc=1 goroutine run in new proc
- * proc=2 defer call save away stack
- * proc=3 normal call to C pointer (not Go func value)
-*/
-func ginscall(f *gc.Node, proc int) {
- if f.Type != nil {
- extra := int32(0)
- if proc == 1 || proc == 2 {
- extra = 2 * int32(gc.Widthptr)
- }
- gc.Setmaxarg(f.Type, extra)
- }
-
- switch proc {
- default:
- gc.Fatal("ginscall: bad proc %d", proc)
-
- case 0, // normal call
- -1: // normal call but no return
- if f.Op == gc.ONAME && f.Class == gc.PFUNC {
- if f == gc.Deferreturn {
- // Deferred calls will appear to be returning to
- // the CALL deferreturn(SB) that we are about to emit.
- // However, the stack trace code will show the line
- // of the instruction byte before the return PC.
- // To avoid that being an unrelated instruction,
- // insert a arm64 NOP that we will have the right line number.
- // The arm64 NOP is really or HINT $0; use that description
- // because the NOP pseudo-instruction would be removed by
- // the linker.
- var con gc.Node
- gc.Nodconst(&con, gc.Types[gc.TINT], 0)
- gins(arm64.AHINT, &con, nil)
- }
-
- p := gins(arm64.ABL, nil, f)
- gc.Afunclit(&p.To, f)
- if proc == -1 || gc.Noreturn(p) {
- gins(obj.AUNDEF, nil, nil)
- }
- break
- }
-
- var reg gc.Node
- gc.Nodreg(®, gc.Types[gc.Tptr], arm64.REGCTXT)
- var r1 gc.Node
- gc.Nodreg(&r1, gc.Types[gc.Tptr], arm64.REGRT1)
- gmove(f, ®)
- reg.Op = gc.OINDREG
- gmove(®, &r1)
- r1.Op = gc.OINDREG
- gins(arm64.ABL, nil, &r1)
-
- case 3: // normal call of c function pointer
- gins(arm64.ABL, nil, f)
-
- case 1, // call in new proc (go)
- 2: // deferred call (defer)
- var con gc.Node
- gc.Nodconst(&con, gc.Types[gc.TINT64], int64(gc.Argsize(f.Type)))
-
- var reg gc.Node
- gc.Nodreg(®, gc.Types[gc.TINT64], arm64.REGRT1)
- var reg2 gc.Node
- gc.Nodreg(®2, gc.Types[gc.TINT64], arm64.REGRT2)
- gmove(f, ®)
-
- gmove(&con, ®2)
- p := gins(arm64.AMOVW, ®2, nil)
- p.To.Type = obj.TYPE_MEM
- p.To.Reg = arm64.REGSP
- p.To.Offset = 8
-
- p = gins(arm64.AMOVD, ®, nil)
- p.To.Type = obj.TYPE_MEM
- p.To.Reg = arm64.REGSP
- p.To.Offset = 16
-
- if proc == 1 {
- ginscall(gc.Newproc, 0)
- } else {
- if gc.Hasdefer == 0 {
- gc.Fatal("hasdefer=0 but has defer")
- }
- ginscall(gc.Deferproc, 0)
- }
-
- if proc == 2 {
- gc.Nodreg(®, gc.Types[gc.TINT64], arm64.REG_R0) // R0 should match runtime.return0
- p := gins(arm64.ACMP, ®, nil)
- p.Reg = arm64.REGZERO
- p = gc.Gbranch(arm64.ABEQ, nil, +1)
- cgen_ret(nil)
- gc.Patch(p, gc.Pc)
- }
- }
-}
-
-/*
- * n is call to interface method.
- * generate res = n.
- */
-func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
- i := n.Left
- if i.Op != gc.ODOTINTER {
- gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
- }
-
- f := i.Right // field
- if f.Op != gc.ONAME {
- gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
- }
-
- i = i.Left // interface
-
- if i.Addable == 0 {
- var tmpi gc.Node
- gc.Tempname(&tmpi, i.Type)
- cgen(i, &tmpi)
- i = &tmpi
- }
-
- gc.Genlist(n.List) // assign the args
-
- // i is now addable, prepare an indirected
- // register to hold its address.
- var nodi gc.Node
- igen(i, &nodi, res) // REG = &inter
-
- var nodsp gc.Node
- gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], arm64.REGSP)
-
- nodsp.Xoffset = int64(gc.Widthptr)
- if proc != 0 {
- nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
- }
- nodi.Type = gc.Types[gc.Tptr]
- nodi.Xoffset += int64(gc.Widthptr)
- cgen(&nodi, &nodsp) // {8 or 24}(SP) = 8(REG) -- i.data
-
- var nodo gc.Node
- regalloc(&nodo, gc.Types[gc.Tptr], res)
-
- nodi.Type = gc.Types[gc.Tptr]
- nodi.Xoffset -= int64(gc.Widthptr)
- cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
- regfree(&nodi)
-
- var nodr gc.Node
- regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
- if n.Left.Xoffset == gc.BADWIDTH {
- gc.Fatal("cgen_callinter: badwidth")
- }
- gc.Cgen_checknil(&nodo) // in case offset is huge
- nodo.Op = gc.OINDREG
- nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
- if proc == 0 {
- // plain call: use direct c function pointer - more efficient
- cgen(&nodo, &nodr) // REG = 32+offset(REG) -- i.tab->fun[f]
- proc = 3
- } else {
- // go/defer. generate go func value.
- p := gins(arm64.AMOVD, &nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
- p.From.Type = obj.TYPE_ADDR
- }
-
- nodr.Type = n.Left.Type
- ginscall(&nodr, proc)
-
- regfree(&nodr)
- regfree(&nodo)
-}
-
-/*
- * generate function call;
- * proc=0 normal call
- * proc=1 goroutine run in new proc
- * proc=2 defer call save away stack
- */
-func cgen_call(n *gc.Node, proc int) {
- if n == nil {
- return
- }
-
- var afun gc.Node
- if n.Left.Ullman >= gc.UINF {
- // if name involves a fn call
- // precompute the address of the fn
- gc.Tempname(&afun, gc.Types[gc.Tptr])
-
- cgen(n.Left, &afun)
- }
-
- gc.Genlist(n.List) // assign the args
- t := n.Left.Type
-
- // call tempname pointer
- if n.Left.Ullman >= gc.UINF {
- var nod gc.Node
- regalloc(&nod, gc.Types[gc.Tptr], nil)
- gc.Cgen_as(&nod, &afun)
- nod.Type = t
- ginscall(&nod, proc)
- regfree(&nod)
- return
- }
-
- // call pointer
- if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
- var nod gc.Node
- regalloc(&nod, gc.Types[gc.Tptr], nil)
- gc.Cgen_as(&nod, n.Left)
- nod.Type = t
- ginscall(&nod, proc)
- regfree(&nod)
- return
- }
-
- // call direct
- n.Left.Method = 1
-
- ginscall(n.Left, proc)
-}
-
-/*
- * call to n has already been generated.
- * generate:
- * res = return value from call.
- */
-func cgen_callret(n *gc.Node, res *gc.Node) {
- t := n.Left.Type
- if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
- t = t.Type
- }
-
- var flist gc.Iter
- fp := gc.Structfirst(&flist, gc.Getoutarg(t))
- if fp == nil {
- gc.Fatal("cgen_callret: nil")
- }
-
- var nod gc.Node
- nod.Op = gc.OINDREG
- nod.Val.U.Reg = arm64.REGSP
- nod.Addable = 1
-
- nod.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved LR at 0(R1)
- nod.Type = fp.Type
- gc.Cgen_as(res, &nod)
-}
-
-/*
- * call to n has already been generated.
- * generate:
- * res = &return value from call.
- */
-func cgen_aret(n *gc.Node, res *gc.Node) {
- t := n.Left.Type
- if gc.Isptr[t.Etype] {
- t = t.Type
- }
-
- var flist gc.Iter
- fp := gc.Structfirst(&flist, gc.Getoutarg(t))
- if fp == nil {
- gc.Fatal("cgen_aret: nil")
- }
-
- var nod1 gc.Node
- nod1.Op = gc.OINDREG
- nod1.Val.U.Reg = arm64.REGSP
- nod1.Addable = 1
-
- nod1.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved lr at 0(SP)
- nod1.Type = fp.Type
-
- if res.Op != gc.OREGISTER {
- var nod2 gc.Node
- regalloc(&nod2, gc.Types[gc.Tptr], res)
- agen(&nod1, &nod2)
- gins(arm64.AMOVD, &nod2, res)
- regfree(&nod2)
- } else {
- agen(&nod1, res)
- }
-}
-
-/*
- * generate return.
- * n->left is assignments to return values.
- */
-func cgen_ret(n *gc.Node) {
- if n != nil {
- gc.Genlist(n.List) // copy out args
- }
- if gc.Hasdefer != 0 {
- ginscall(gc.Deferreturn, 0)
- }
- gc.Genlist(gc.Curfn.Exit)
- p := gins(obj.ARET, nil, nil)
- if n != nil && n.Op == gc.ORETJMP {
- p.To.Name = obj.NAME_EXTERN
- p.To.Type = obj.TYPE_ADDR
- p.To.Sym = gc.Linksym(n.Left.Sym)
- }
+func ginsnop() {
+ var con gc.Node
+ gc.Nodconst(&con, gc.Types[gc.TINT], 0)
+ gins(arm64.AHINT, &con, nil)
}
/*
a := optoas(gc.ODIV, t)
var tl gc.Node
- regalloc(&tl, t0, nil)
+ gc.Regalloc(&tl, t0, nil)
var tr gc.Node
- regalloc(&tr, t0, nil)
+ gc.Regalloc(&tr, t0, nil)
if nl.Ullman >= nr.Ullman {
- cgen(nl, &tl)
- cgen(nr, &tr)
+ gc.Cgen(nl, &tl)
+ gc.Cgen(nr, &tr)
} else {
- cgen(nr, &tr)
- cgen(nl, &tl)
+ gc.Cgen(nr, &tr)
+ gc.Cgen(nl, &tl)
}
if t != t0 {
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
- ginscall(panicdiv, -1)
+ gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
var p2 *obj.Prog
p1 = gins(a, &tr, &tl)
if op == gc.ODIV {
- regfree(&tr)
+ gc.Regfree(&tr)
gmove(&tl, res)
} else {
// A%B = A-(A/B*B)
var tm gc.Node
- regalloc(&tm, t, nil)
+ gc.Regalloc(&tm, t, nil)
// patch div to use the 3 register form
// TODO(minux): add gins3?
p1.To.Reg = tm.Val.U.Reg
gins(optoas(gc.OMUL, t), &tr, &tm)
- regfree(&tr)
+ gc.Regfree(&tr)
gins(optoas(gc.OSUB, t), &tm, &tl)
- regfree(&tm)
+ gc.Regfree(&tm)
gmove(&tl, res)
}
- regfree(&tl)
+ gc.Regfree(&tl)
if check != 0 {
gc.Patch(p2, gc.Pc)
}
}
-/*
- * generate division according to op, one of:
- * res = nl / nr
- * res = nl % nr
- */
-func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- // TODO(minux): enable division by magic multiply (also need to fix longmod below)
- //if(nr->op != OLITERAL)
- // division and mod using (slow) hardware instruction
- dodiv(op, nl, nr, res)
-
- return
-}
-
/*
* generate high multiply:
* res = (nl*nr) >> width
t := (*gc.Type)(nl.Type)
w := int(int(t.Width * 8))
var n1 gc.Node
- cgenr(nl, &n1, res)
+ gc.Cgenr(nl, &n1, res)
var n2 gc.Node
- cgenr(nr, &n2, nil)
+ gc.Cgenr(nr, &n2, nil)
switch gc.Simtype[t.Etype] {
case gc.TINT8,
gc.TINT16,
gc.Fatal("cgen_hmul %v", gc.Tconv(t, 0))
}
- cgen(&n1, res)
- regfree(&n1)
- regfree(&n2)
+ gc.Cgen(&n1, res)
+ gc.Regfree(&n1)
+ gc.Regfree(&n2)
}
/*
if nr.Op == gc.OLITERAL {
var n1 gc.Node
- regalloc(&n1, nl.Type, res)
- cgen(nl, &n1)
+ gc.Regalloc(&n1, nl.Type, res)
+ gc.Cgen(nl, &n1)
sc := uint64(uint64(gc.Mpgetfix(nr.Val.U.Xval)))
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
gins(a, nr, &n1)
}
gmove(&n1, res)
- regfree(&n1)
+ gc.Regfree(&n1)
return
}
if nl.Ullman >= gc.UINF {
var n4 gc.Node
gc.Tempname(&n4, nl.Type)
- cgen(nl, &n4)
+ gc.Cgen(nl, &n4)
nl = &n4
}
if nr.Ullman >= gc.UINF {
var n5 gc.Node
gc.Tempname(&n5, nr.Type)
- cgen(nr, &n5)
+ gc.Cgen(nr, &n5)
nr = &n5
}
}
var n1 gc.Node
- regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
+ gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
var n3 gc.Node
- regalloc(&n3, tcount, &n1) // to clear high bits of CX
+ gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
var n2 gc.Node
- regalloc(&n2, nl.Type, res)
+ gc.Regalloc(&n2, nl.Type, res)
if nl.Ullman >= nr.Ullman {
- cgen(nl, &n2)
- cgen(nr, &n1)
+ gc.Cgen(nl, &n2)
+ gc.Cgen(nr, &n1)
gmove(&n1, &n3)
} else {
- cgen(nr, &n1)
+ gc.Cgen(nr, &n1)
gmove(&n1, &n3)
- cgen(nl, &n2)
+ gc.Cgen(nl, &n2)
}
- regfree(&n3)
+ gc.Regfree(&n3)
// test and fix up large shifts
if !bounded {
gmove(&n2, res)
- regfree(&n1)
- regfree(&n2)
+ gc.Regfree(&n1)
+ gc.Regfree(&n2)
}
func clearfat(nl *gc.Node) {
var dst gc.Node
gc.Nodreg(&dst, gc.Types[gc.Tptr], arm64.REGRT1)
reg[arm64.REGRT1-arm64.REG_R0]++
- agen(nl, &dst)
+ gc.Agen(nl, &dst)
var boff uint64
if q > 128 {
p.From.Offset = 8
var end gc.Node
- regalloc(&end, gc.Types[gc.Tptr], nil)
+ gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p = gins(arm64.AMOVD, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q * 8)
p = gcmp(arm64.ACMP, &dst, &end)
gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), pl)
- regfree(&end)
+ gc.Regfree(&end)
// The loop leaves R16 on the last zeroed dword
boff = 8
arm64.FREGTWO,
}
-func ginit() {
- for i := 0; i < len(reg); i++ {
- reg[i] = 1
- }
- for i := 0; i < arm64.NREG+arm64.NFREG; i++ {
- reg[i] = 0
- }
-
- for i := 0; i < len(resvd); i++ {
- reg[resvd[i]-arm64.REG_R0]++
- }
-}
-
-var regpc [len(reg)]uint32
-
-func gclean() {
- for i := int(0); i < len(resvd); i++ {
- reg[resvd[i]-arm64.REG_R0]--
- }
-
- for i := int(0); i < len(reg); i++ {
- if reg[i] != 0 {
- gc.Yyerror("reg %v left allocated, %p\n", obj.Rconv(i+arm64.REG_R0), regpc[i])
- }
- }
-}
-
-func anyregalloc() bool {
- var j int
-
- for i := int(0); i < len(reg); i++ {
- if reg[i] == 0 {
- goto ok
- }
- for j = 0; j < len(resvd); j++ {
- if resvd[j] == i {
- goto ok
- }
- }
- return true
- ok:
- }
-
- return false
-}
-
-/*
- * allocate register of type t, leave in n.
- * if o != N, o is desired fixed register.
- * caller must regfree(n).
- */
-func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
- if t == nil {
- gc.Fatal("regalloc: t nil")
- }
- et := int(int(gc.Simtype[t.Etype]))
-
- if gc.Debug['r'] != 0 {
- fixfree := int(0)
- fltfree := int(0)
- for i := int(arm64.REG_R0); i < arm64.REG_F31; i++ {
- if reg[i-arm64.REG_R0] == 0 {
- if i < arm64.REG_F0 {
- fixfree++
- } else {
- fltfree++
- }
- }
- }
-
- fmt.Printf("regalloc fix %d flt %d free\n", fixfree, fltfree)
- }
-
- var i int
- switch et {
- case gc.TINT8,
- gc.TUINT8,
- gc.TINT16,
- gc.TUINT16,
- gc.TINT32,
- gc.TUINT32,
- gc.TINT64,
- gc.TUINT64,
- gc.TPTR32,
- gc.TPTR64,
- gc.TBOOL:
- if o != nil && o.Op == gc.OREGISTER {
- i = int(o.Val.U.Reg)
- if i >= arm64.REGMIN && i <= arm64.REGMAX {
- goto out
- }
- }
-
- for i = arm64.REGMIN; i <= arm64.REGMAX; i++ {
- if reg[i-arm64.REG_R0] == 0 {
- regpc[i-arm64.REG_R0] = uint32(obj.Getcallerpc(&n))
- goto out
- }
- }
-
- gc.Flusherrors()
- for i := int(arm64.REG_R0); i < arm64.REG_R0+arm64.NREG; i++ {
- fmt.Printf("R%d %p\n", i, regpc[i-arm64.REG_R0])
- }
- gc.Fatal("out of fixed registers")
-
- case gc.TFLOAT32,
- gc.TFLOAT64:
- if o != nil && o.Op == gc.OREGISTER {
- i = int(o.Val.U.Reg)
- if i >= arm64.FREGMIN && i <= arm64.FREGMAX {
- goto out
- }
- }
-
- for i = arm64.FREGMIN; i <= arm64.FREGMAX; i++ {
- if reg[i-arm64.REG_R0] == 0 {
- regpc[i-arm64.REG_R0] = uint32(obj.Getcallerpc(&n))
- goto out
- }
- }
-
- gc.Flusherrors()
- for i := int(arm64.REG_F0); i < arm64.REG_F0+arm64.NREG; i++ {
- fmt.Printf("F%d %p\n", i, regpc[i-arm64.REG_R0])
- }
- gc.Fatal("out of floating registers")
-
- case gc.TCOMPLEX64,
- gc.TCOMPLEX128:
- gc.Tempname(n, t)
- return
- }
-
- gc.Fatal("regalloc: unknown type %v", gc.Tconv(t, 0))
- return
-
-out:
- reg[i-arm64.REG_R0]++
- gc.Nodreg(n, t, i)
-}
-
-func regfree(n *gc.Node) {
- if n.Op == gc.ONAME {
- return
- }
- if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
- gc.Fatal("regfree: not a register")
- }
- i := int(int(n.Val.U.Reg) - arm64.REG_R0)
- if i == arm64.REGSP-arm64.REG_R0 {
- return
- }
- if i < 0 || i >= len(reg) {
- gc.Fatal("regfree: reg out of range")
- }
- if reg[i] <= 0 {
- gc.Fatal("regfree: reg not allocated")
- }
- reg[i]--
- if reg[i] == 0 {
- regpc[i] = 0
- }
-}
-
/*
* generate
* as $c, n
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
- if as != arm64.AMOVD && (c < -arm64.BIG || c > arm64.BIG) {
+ if as != arm64.AMOVD && (c < -arm64.BIG || c > arm64.BIG) || as == arm64.AMUL || n2 != nil && n2.Op != gc.OREGISTER {
// cannot have more than 16-bit of immediate in ADD, etc.
// instead, MOV into register first.
var ntmp gc.Node
- regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+ gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gins(arm64.AMOVD, &n1, &ntmp)
gins(as, &ntmp, n2)
- regfree(&ntmp)
+ gc.Regfree(&ntmp)
return
}
- gins(as, &n1, n2)
+ rawgins(as, &n1, n2)
}
/*
// MOV n1 into register first
var ntmp gc.Node
- regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+ gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
- gins(arm64.AMOVD, &n1, &ntmp)
+ rawgins(arm64.AMOVD, &n1, &ntmp)
gcmp(as, n2, &ntmp)
- regfree(&ntmp)
+ gc.Regfree(&ntmp)
}
/*
var con gc.Node
gc.Convconst(&con, gc.Types[gc.TINT64], &f.Val)
var r1 gc.Node
- regalloc(&r1, con.Type, t)
+ gc.Regalloc(&r1, con.Type, t)
gins(arm64.AMOVD, &con, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
case gc.TUINT32,
var con gc.Node
gc.Convconst(&con, gc.Types[gc.TUINT64], &f.Val)
var r1 gc.Node
- regalloc(&r1, con.Type, t)
+ gc.Regalloc(&r1, con.Type, t)
gins(arm64.AMOVD, &con, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
}
// requires register destination
rdst:
- regalloc(&r1, t.Type, t)
+ gc.Regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
// requires register intermediate
hard:
- regalloc(&r1, cvt, t)
+ gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
}
+func intLiteral(n *gc.Node) (x int64, ok bool) {
+ if n == nil || n.Op != gc.OLITERAL {
+ return
+ }
+ switch n.Val.Ctype {
+ case gc.CTINT, gc.CTRUNE:
+ return gc.Mpgetfix(n.Val.U.Xval), true
+ case gc.CTBOOL:
+ return int64(n.Val.U.Bval), true
+ }
+ return
+}
+
+// gins is called by the front end.
+// It synthesizes some multiple-instruction sequences
+// so the front end can stay simpler.
+func gins(as int, f, t *gc.Node) *obj.Prog {
+ if as >= obj.A_ARCHSPECIFIC {
+ if x, ok := intLiteral(f); ok {
+ ginscon(as, x, t)
+ return nil // caller must not use
+ }
+ }
+ if as == arm64.ACMP {
+ if x, ok := intLiteral(t); ok {
+ ginscon2(as, f, x)
+ return nil // caller must not use
+ }
+ }
+ return rawgins(as, f, t)
+}
+
/*
* generate one instruction:
* as f, t
*/
-func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
// TODO(austin): Add self-move test like in 6g (but be careful
// of truncation moves)
gc.Naddr(&p.From, f)
gc.Naddr(&p.To, t)
+ switch as {
+ case arm64.ACMP, arm64.AFCMPS, arm64.AFCMPD:
+ if t != nil {
+ if f.Op != gc.OREGISTER {
+ gc.Fatal("bad operands to gcmp")
+ }
+ p.From = p.To
+ p.To = obj.Addr{}
+ raddr(f, p)
+ }
+ }
+
+ // Bad things the front end has done to us. Crash to find call stack.
+ switch as {
+ case arm64.AAND, arm64.AMUL:
+ if p.From.Type == obj.TYPE_CONST {
+ gc.Debug['h'] = 1
+ gc.Fatal("bad inst: %v", p)
+ }
+ case arm64.ACMP:
+ if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
+ gc.Debug['h'] = 1
+ gc.Fatal("bad inst: %v", p)
+ }
+ }
+
if gc.Debug['g'] != 0 {
fmt.Printf("%v\n", p)
}
gc.Fatal("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0))
}
- p := gins(as, rhs, nil)
+ p := rawgins(as, rhs, nil)
raddr(lhs, p)
return p
}
"cmd/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/x86"
- "fmt"
)
-/*
- * reg.c
- */
-
-/*
- * peep.c
- */
-func mgen(n *gc.Node, n1 *gc.Node, rg *gc.Node) {
- n1.Op = gc.OEMPTY
-
- if n.Addable != 0 {
- *n1 = *n
- if n1.Op == gc.OREGISTER || n1.Op == gc.OINDREG {
- reg[n.Val.U.Reg]++
- }
- return
- }
-
- gc.Tempname(n1, n.Type)
- cgen(n, n1)
- if n.Type.Width <= int64(gc.Widthptr) || gc.Isfloat[n.Type.Etype] {
- n2 := *n1
- regalloc(n1, n.Type, rg)
- gmove(&n2, n1)
- }
-}
-
-func mfree(n *gc.Node) {
- if n.Op == gc.OREGISTER {
- regfree(n)
- }
-}
-
-/*
- * generate:
- * res = n;
- * simplifies and calls gmove.
- *
- * TODO:
- * sudoaddable
- */
-func cgen(n *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\ncgen-n", n)
- gc.Dump("cgen-res", res)
- }
-
- if n == nil || n.Type == nil {
- gc.Fatal("cgen: n nil")
- }
- if res == nil || res.Type == nil {
- gc.Fatal("cgen: res nil")
- }
-
- switch n.Op {
- case gc.OSLICE,
- gc.OSLICEARR,
- gc.OSLICESTR,
- gc.OSLICE3,
- gc.OSLICE3ARR:
- if res.Op != gc.ONAME || res.Addable == 0 {
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_slice(n, &n1)
- cgen(&n1, res)
- } else {
- gc.Cgen_slice(n, res)
- }
- return
-
- case gc.OEFACE:
- if res.Op != gc.ONAME || res.Addable == 0 {
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_eface(n, &n1)
- cgen(&n1, res)
- } else {
- gc.Cgen_eface(n, res)
- }
- return
- }
-
- for n.Op == gc.OCONVNOP {
- n = n.Left
- }
-
- // function calls on both sides? introduce temporary
- if n.Ullman >= gc.UINF && res.Ullman >= gc.UINF {
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- cgen(n, &n1)
- cgen(&n1, res)
- return
- }
-
- // structs etc get handled specially
- if gc.Isfat(n.Type) {
- if n.Type.Width < 0 {
- gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
- }
- sgen(n, res, n.Type.Width)
- return
- }
-
- // update addressability for string, slice
- // can't do in walk because n->left->addable
- // changes if n->left is an escaping local variable.
- switch n.Op {
- case gc.OSPTR,
- gc.OLEN:
- if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
- n.Addable = n.Left.Addable
- }
-
- case gc.OCAP:
- if gc.Isslice(n.Left.Type) {
- n.Addable = n.Left.Addable
- }
-
- case gc.OITAB:
- n.Addable = n.Left.Addable
- }
-
- // if both are addressable, move
- if n.Addable != 0 && res.Addable != 0 {
- gmove(n, res)
- return
- }
-
- // if both are not addressable, use a temporary.
- if n.Addable == 0 && res.Addable == 0 {
- // could use regalloc here sometimes,
- // but have to check for ullman >= UINF.
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
-
- cgen(n, &n1)
- cgen(&n1, res)
- return
- }
-
- // if result is not addressable directly but n is,
- // compute its address and then store via the address.
- if res.Addable == 0 {
- var n1 gc.Node
- igen(res, &n1, nil)
- cgen(n, &n1)
- regfree(&n1)
- return
- }
-
- // complex types
- if gc.Complexop(n, res) {
- gc.Complexgen(n, res)
- return
- }
-
- // otherwise, the result is addressable but n is not.
- // let's do some computation.
-
- // use ullman to pick operand to eval first.
- nl := n.Left
-
- nr := n.Right
- if nl != nil && nl.Ullman >= gc.UINF {
- if nr != nil && nr.Ullman >= gc.UINF {
- // both are hard
- var n1 gc.Node
- gc.Tempname(&n1, nl.Type)
-
- cgen(nl, &n1)
- n2 := *n
- n2.Left = &n1
- cgen(&n2, res)
- return
- }
- }
-
- // 64-bit ops are hard on 32-bit machine.
- if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Left != nil && gc.Is64(n.Left.Type) {
- switch n.Op {
- // math goes to cgen64.
- case gc.OMINUS,
- gc.OCOM,
- gc.OADD,
- gc.OSUB,
- gc.OMUL,
- gc.OLROT,
- gc.OLSH,
- gc.ORSH,
- gc.OAND,
- gc.OOR,
- gc.OXOR:
- cgen64(n, res)
-
- return
- }
- }
-
- if nl != nil && gc.Isfloat[n.Type.Etype] && gc.Isfloat[nl.Type.Etype] {
- cgen_float(n, res)
- return
- }
-
- var a int
- switch n.Op {
- default:
- gc.Dump("cgen", n)
- gc.Fatal("cgen %v", gc.Oconv(int(n.Op), 0))
-
- case gc.OREAL,
- gc.OIMAG,
- gc.OCOMPLEX:
- gc.Fatal("unexpected complex")
- return
-
- // these call bgen to get a bool value
- case gc.OOROR,
- gc.OANDAND,
- gc.OEQ,
- gc.ONE,
- gc.OLT,
- gc.OLE,
- gc.OGE,
- gc.OGT,
- gc.ONOT:
- p1 := gc.Gbranch(obj.AJMP, nil, 0)
-
- p2 := gc.Pc
- gmove(gc.Nodbool(true), res)
- p3 := gc.Gbranch(obj.AJMP, nil, 0)
- gc.Patch(p1, gc.Pc)
- bgen(n, true, 0, p2)
- gmove(gc.Nodbool(false), res)
- gc.Patch(p3, gc.Pc)
- return
-
- case gc.OPLUS:
- cgen(nl, res)
- return
-
- case gc.OMINUS,
- gc.OCOM:
- a := optoas(int(n.Op), nl.Type)
- // unary
- var n1 gc.Node
- gc.Tempname(&n1, nl.Type)
-
- cgen(nl, &n1)
- gins(a, nil, &n1)
- gmove(&n1, res)
- return
-
- // symmetric binary
- case gc.OAND,
- gc.OOR,
- gc.OXOR,
- gc.OADD,
- gc.OMUL:
- a = optoas(int(n.Op), nl.Type)
-
- if a == x86.AIMULB {
- cgen_bmul(int(n.Op), nl, nr, res)
- break
- }
-
- // symmetric binary
- if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
- r := nl
- nl = nr
- nr = r
- }
- goto abop
-
- // asymmetric binary
- case gc.OSUB:
- a = optoas(int(n.Op), nl.Type)
-
- goto abop
-
- case gc.OHMUL:
- cgen_hmul(nl, nr, res)
-
- case gc.OCONV:
- if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
- cgen(nl, res)
- break
- }
-
- var n2 gc.Node
- gc.Tempname(&n2, n.Type)
- var n1 gc.Node
- mgen(nl, &n1, res)
- gmove(&n1, &n2)
- gmove(&n2, res)
- mfree(&n1)
-
- case gc.ODOT,
- gc.ODOTPTR,
- gc.OINDEX,
- gc.OIND,
- gc.ONAME: // PHEAP or PPARAMREF var
- var n1 gc.Node
- igen(n, &n1, res)
-
- gmove(&n1, res)
- regfree(&n1)
-
- case gc.OITAB:
- var n1 gc.Node
- igen(nl, &n1, res)
- n1.Type = gc.Ptrto(gc.Types[gc.TUINTPTR])
- gmove(&n1, res)
- regfree(&n1)
-
- // pointer is the first word of string or slice.
- case gc.OSPTR:
- if gc.Isconst(nl, gc.CTSTR) {
- var n1 gc.Node
- regalloc(&n1, gc.Types[gc.Tptr], res)
- p1 := gins(x86.ALEAL, nil, &n1)
- gc.Datastring(nl.Val.U.Sval, &p1.From)
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- var n1 gc.Node
- igen(nl, &n1, res)
- n1.Type = n.Type
- gmove(&n1, res)
- regfree(&n1)
-
- case gc.OLEN:
- if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
- // map has len in the first 32-bit word.
- // a zero pointer means zero length
- var n1 gc.Node
- gc.Tempname(&n1, gc.Types[gc.Tptr])
-
- cgen(nl, &n1)
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.Tptr], nil)
- gmove(&n1, &n2)
- n1 = n2
-
- gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
- gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
-
- n2 = n1
- n2.Op = gc.OINDREG
- n2.Type = gc.Types[gc.TINT32]
- gmove(&n2, &n1)
-
- gc.Patch(p1, gc.Pc)
-
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
- // both slice and string have len one pointer into the struct.
- var n1 gc.Node
- igen(nl, &n1, res)
-
- n1.Type = gc.Types[gc.TUINT32]
- n1.Xoffset += int64(gc.Array_nel)
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
-
- case gc.OCAP:
- if gc.Istype(nl.Type, gc.TCHAN) {
- // chan has cap in the second 32-bit word.
- // a zero pointer means zero length
- var n1 gc.Node
- gc.Tempname(&n1, gc.Types[gc.Tptr])
-
- cgen(nl, &n1)
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.Tptr], nil)
- gmove(&n1, &n2)
- n1 = n2
-
- gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
- gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
-
- n2 = n1
- n2.Op = gc.OINDREG
- n2.Xoffset = 4
- n2.Type = gc.Types[gc.TINT32]
- gmove(&n2, &n1)
-
- gc.Patch(p1, gc.Pc)
-
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- if gc.Isslice(nl.Type) {
- var n1 gc.Node
- igen(nl, &n1, res)
- n1.Type = gc.Types[gc.TUINT32]
- n1.Xoffset += int64(gc.Array_cap)
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
-
- case gc.OADDR:
- agen(nl, res)
-
- case gc.OCALLMETH:
- gc.Cgen_callmeth(n, 0)
- cgen_callret(n, res)
-
- case gc.OCALLINTER:
- cgen_callinter(n, res, 0)
- cgen_callret(n, res)
-
- case gc.OCALLFUNC:
- cgen_call(n, 0)
- cgen_callret(n, res)
-
- case gc.OMOD,
- gc.ODIV:
- cgen_div(int(n.Op), nl, nr, res)
-
- case gc.OLSH,
- gc.ORSH,
- gc.OLROT:
- cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
- }
-
- return
-
-abop: // asymmetric binary
- if gc.Smallintconst(nr) {
- var n1 gc.Node
- mgen(nl, &n1, res)
- var n2 gc.Node
- regalloc(&n2, nl.Type, &n1)
- gmove(&n1, &n2)
- gins(a, nr, &n2)
- gmove(&n2, res)
- regfree(&n2)
- mfree(&n1)
- } else if nl.Ullman >= nr.Ullman {
- var nt gc.Node
- gc.Tempname(&nt, nl.Type)
- cgen(nl, &nt)
- var n2 gc.Node
- mgen(nr, &n2, nil)
- var n1 gc.Node
- regalloc(&n1, nl.Type, res)
- gmove(&nt, &n1)
- gins(a, &n2, &n1)
- gmove(&n1, res)
- regfree(&n1)
- mfree(&n2)
- } else {
- var n2 gc.Node
- regalloc(&n2, nr.Type, res)
- cgen(nr, &n2)
- var n1 gc.Node
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
- gins(a, &n2, &n1)
- regfree(&n2)
- gmove(&n1, res)
- regfree(&n1)
- }
-
- return
-}
-
/*
* generate an addressable node in res, containing the value of n.
* n is an array index, and might be any size; res width is <= 32-bit.
* returns Prog* to patch to panic call.
*/
-func igenindex(n *gc.Node, res *gc.Node, bounded int) *obj.Prog {
+func igenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
if !gc.Is64(n.Type) {
if n.Addable != 0 {
// nothing to do.
*res = *n
} else {
gc.Tempname(res, gc.Types[gc.TUINT32])
- cgen(n, res)
+ gc.Cgen(n, res)
}
return nil
var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
- cgen(n, &tmp)
+ gc.Cgen(n, &tmp)
var lo gc.Node
var hi gc.Node
split64(&tmp, &lo, &hi)
gc.Tempname(res, gc.Types[gc.TUINT32])
gmove(&lo, res)
- if bounded != 0 {
+ if bounded {
splitclean()
return nil
}
return gc.Gbranch(x86.AJNE, nil, +1)
}
-/*
- * address gen
- * res = &n;
- * The generated code checks that the result is not nil.
- */
-func agen(n *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\nagen-res", res)
- gc.Dump("agen-r", n)
- }
-
- if n == nil || n.Type == nil || res == nil || res.Type == nil {
- gc.Fatal("agen")
- }
-
- for n.Op == gc.OCONVNOP {
- n = n.Left
- }
-
- if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
- // Use of a nil interface or nil slice.
- // Create a temporary we can take the address of and read.
- // The generated code is just going to panic, so it need not
- // be terribly efficient. See issue 3670.
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
-
- gc.Gvardef(&n1)
- clearfat(&n1)
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.Tptr], res)
- gins(x86.ALEAL, &n1, &n2)
- gmove(&n2, res)
- regfree(&n2)
- return
- }
-
- // addressable var is easy
- if n.Addable != 0 {
- if n.Op == gc.OREGISTER {
- gc.Fatal("agen OREGISTER")
- }
- var n1 gc.Node
- regalloc(&n1, gc.Types[gc.Tptr], res)
- gins(x86.ALEAL, n, &n1)
- gmove(&n1, res)
- regfree(&n1)
- return
- }
-
- // let's compute
- nl := n.Left
-
- nr := n.Right
-
- switch n.Op {
- default:
- gc.Fatal("agen %v", gc.Oconv(int(n.Op), 0))
-
- case gc.OCALLMETH:
- gc.Cgen_callmeth(n, 0)
- cgen_aret(n, res)
-
- case gc.OCALLINTER:
- cgen_callinter(n, res, 0)
- cgen_aret(n, res)
-
- case gc.OCALLFUNC:
- cgen_call(n, 0)
- cgen_aret(n, res)
-
- case gc.OSLICE,
- gc.OSLICEARR,
- gc.OSLICESTR,
- gc.OSLICE3,
- gc.OSLICE3ARR:
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_slice(n, &n1)
- agen(&n1, res)
-
- case gc.OEFACE:
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_eface(n, &n1)
- agen(&n1, res)
-
- case gc.OINDEX:
- var p2 *obj.Prog // to be patched to panicindex.
- w := uint32(n.Type.Width)
- bounded := gc.Debug['B'] != 0 || n.Bounded
- var n3 gc.Node
- var tmp gc.Node
- var n1 gc.Node
- if nr.Addable != 0 {
- // Generate &nl first, and move nr into register.
- if !gc.Isconst(nl, gc.CTSTR) {
- igen(nl, &n3, res)
- }
- if !gc.Isconst(nr, gc.CTINT) {
- p2 = igenindex(nr, &tmp, bool2int(bounded))
- regalloc(&n1, tmp.Type, nil)
- gmove(&tmp, &n1)
- }
- } else if nl.Addable != 0 {
- // Generate nr first, and move &nl into register.
- if !gc.Isconst(nr, gc.CTINT) {
- p2 = igenindex(nr, &tmp, bool2int(bounded))
- regalloc(&n1, tmp.Type, nil)
- gmove(&tmp, &n1)
- }
-
- if !gc.Isconst(nl, gc.CTSTR) {
- igen(nl, &n3, res)
- }
- } else {
- p2 = igenindex(nr, &tmp, bool2int(bounded))
- nr = &tmp
- if !gc.Isconst(nl, gc.CTSTR) {
- igen(nl, &n3, res)
- }
- regalloc(&n1, tmp.Type, nil)
- gins(optoas(gc.OAS, tmp.Type), &tmp, &n1)
- }
-
- // For fixed array we really want the pointer in n3.
- var n2 gc.Node
- if gc.Isfixedarray(nl.Type) {
- regalloc(&n2, gc.Types[gc.Tptr], &n3)
- agen(&n3, &n2)
- regfree(&n3)
- n3 = n2
- }
-
- // &a[0] is in n3 (allocated in res)
- // i is in n1 (if not constant)
- // len(a) is in nlen (if needed)
- // w is width
-
- // constant index
- if gc.Isconst(nr, gc.CTINT) {
- if gc.Isconst(nl, gc.CTSTR) {
- gc.Fatal("constant string constant index") // front end should handle
- }
- v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
- if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
- if gc.Debug['B'] == 0 && !n.Bounded {
- nlen := n3
- nlen.Type = gc.Types[gc.TUINT32]
- nlen.Xoffset += int64(gc.Array_nel)
- gc.Nodconst(&n2, gc.Types[gc.TUINT32], int64(v))
- gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &nlen, &n2)
- p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
- ginscall(gc.Panicindex, -1)
- gc.Patch(p1, gc.Pc)
- }
- }
-
- // Load base pointer in n2 = n3.
- regalloc(&n2, gc.Types[gc.Tptr], &n3)
-
- n3.Type = gc.Types[gc.Tptr]
- n3.Xoffset += int64(gc.Array_array)
- gmove(&n3, &n2)
- regfree(&n3)
- if v*uint64(w) != 0 {
- gc.Nodconst(&n1, gc.Types[gc.Tptr], int64(v*uint64(w)))
- gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, &n2)
- }
-
- gmove(&n2, res)
- regfree(&n2)
- break
- }
-
- // i is in register n1, extend to 32 bits.
- t := gc.Types[gc.TUINT32]
-
- if gc.Issigned[n1.Type.Etype] {
- t = gc.Types[gc.TINT32]
- }
-
- regalloc(&n2, t, &n1) // i
- gmove(&n1, &n2)
- regfree(&n1)
-
- if gc.Debug['B'] == 0 && !n.Bounded {
- // check bounds
- t := gc.Types[gc.TUINT32]
-
- var nlen gc.Node
- if gc.Isconst(nl, gc.CTSTR) {
- gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval)))
- } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
- nlen = n3
- nlen.Type = t
- nlen.Xoffset += int64(gc.Array_nel)
- } else {
- gc.Nodconst(&nlen, t, nl.Type.Bound)
- }
-
- gins(optoas(gc.OCMP, t), &n2, &nlen)
- p1 := gc.Gbranch(optoas(gc.OLT, t), nil, +1)
- if p2 != nil {
- gc.Patch(p2, gc.Pc)
- }
- ginscall(gc.Panicindex, -1)
- gc.Patch(p1, gc.Pc)
- }
-
- if gc.Isconst(nl, gc.CTSTR) {
- regalloc(&n3, gc.Types[gc.Tptr], res)
- p1 := gins(x86.ALEAL, nil, &n3)
- gc.Datastring(nl.Val.U.Sval, &p1.From)
- p1.From.Scale = 1
- p1.From.Index = n2.Val.U.Reg
- goto indexdone
- }
-
- // Load base pointer in n3.
- regalloc(&tmp, gc.Types[gc.Tptr], &n3)
-
- if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
- n3.Type = gc.Types[gc.Tptr]
- n3.Xoffset += int64(gc.Array_array)
- gmove(&n3, &tmp)
- }
-
- regfree(&n3)
- n3 = tmp
-
- if w == 0 {
- } else // nothing to do
- if w == 1 || w == 2 || w == 4 || w == 8 {
- // LEAL (n3)(n2*w), n3
- p1 := gins(x86.ALEAL, &n2, &n3)
-
- p1.From.Scale = int16(w)
- p1.From.Type = obj.TYPE_MEM
- p1.From.Index = p1.From.Reg
- p1.From.Reg = p1.To.Reg
- } else {
- gc.Nodconst(&tmp, gc.Types[gc.TUINT32], int64(w))
- gins(optoas(gc.OMUL, gc.Types[gc.TUINT32]), &tmp, &n2)
- gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
- }
-
- indexdone:
- gmove(&n3, res)
- regfree(&n2)
- regfree(&n3)
-
- // should only get here with names in this func.
- case gc.ONAME:
- if n.Funcdepth > 0 && n.Funcdepth != gc.Funcdepth {
- gc.Dump("bad agen", n)
- gc.Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, gc.Funcdepth)
- }
-
- // should only get here for heap vars or paramref
- if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
- gc.Dump("bad agen", n)
- gc.Fatal("agen: bad ONAME class %#x", n.Class)
- }
-
- cgen(n.Heapaddr, res)
- if n.Xoffset != 0 {
- var n1 gc.Node
- gc.Nodconst(&n1, gc.Types[gc.Tptr], n.Xoffset)
- gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, res)
- }
-
- case gc.OIND:
- cgen(nl, res)
- gc.Cgen_checknil(res)
-
- case gc.ODOT:
- agen(nl, res)
- if n.Xoffset != 0 {
- var n1 gc.Node
- gc.Nodconst(&n1, gc.Types[gc.Tptr], n.Xoffset)
- gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, res)
- }
-
- case gc.ODOTPTR:
- t := nl.Type
- if !gc.Isptr[t.Etype] {
- gc.Fatal("agen: not ptr %v", gc.Nconv(n, 0))
- }
- cgen(nl, res)
- gc.Cgen_checknil(res)
- if n.Xoffset != 0 {
- var n1 gc.Node
- gc.Nodconst(&n1, gc.Types[gc.Tptr], n.Xoffset)
- gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, res)
- }
- }
-}
-
-/*
- * generate:
- * newreg = &n;
- * res = newreg
- *
- * on exit, a has been changed to be *newreg.
- * caller must regfree(a).
- * The generated code checks that the result is not *nil.
- */
-func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\nigen-n", n)
- }
-
- switch n.Op {
- case gc.ONAME:
- if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
- break
- }
- *a = *n
- return
-
- // Increase the refcount of the register so that igen's caller
- // has to call regfree.
- case gc.OINDREG:
- if n.Val.U.Reg != x86.REG_SP {
- reg[n.Val.U.Reg]++
- }
- *a = *n
- return
-
- case gc.ODOT:
- igen(n.Left, a, res)
- a.Xoffset += n.Xoffset
- a.Type = n.Type
- return
-
- case gc.ODOTPTR:
- switch n.Left.Op {
- // igen-able nodes.
- case gc.ODOT,
- gc.ODOTPTR,
- gc.OCALLFUNC,
- gc.OCALLMETH,
- gc.OCALLINTER:
- var n1 gc.Node
- igen(n.Left, &n1, res)
-
- regalloc(a, gc.Types[gc.Tptr], &n1)
- gmove(&n1, a)
- regfree(&n1)
-
- default:
- regalloc(a, gc.Types[gc.Tptr], res)
- cgen(n.Left, a)
- }
-
- gc.Cgen_checknil(a)
- a.Op = gc.OINDREG
- a.Xoffset += n.Xoffset
- a.Type = n.Type
- return
-
- case gc.OCALLFUNC,
- gc.OCALLMETH,
- gc.OCALLINTER:
- switch n.Op {
- case gc.OCALLFUNC:
- cgen_call(n, 0)
-
- case gc.OCALLMETH:
- gc.Cgen_callmeth(n, 0)
-
- case gc.OCALLINTER:
- cgen_callinter(n, nil, 0)
- }
-
- var flist gc.Iter
- fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
- *a = gc.Node{}
- a.Op = gc.OINDREG
- a.Val.U.Reg = x86.REG_SP
- a.Addable = 1
- a.Xoffset = fp.Width
- a.Type = n.Type
- return
-
- // Index of fixed-size array by constant can
- // put the offset in the addressing.
- // Could do the same for slice except that we need
- // to use the real index for the bounds checking.
- case gc.OINDEX:
- if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] && gc.Isfixedarray(n.Left.Left.Type)) {
- if gc.Isconst(n.Right, gc.CTINT) {
- // Compute &a.
- if !gc.Isptr[n.Left.Type.Etype] {
- igen(n.Left, a, res)
- } else {
- var n1 gc.Node
- igen(n.Left, &n1, res)
- gc.Cgen_checknil(&n1)
- regalloc(a, gc.Types[gc.Tptr], res)
- gmove(&n1, a)
- regfree(&n1)
- a.Op = gc.OINDREG
- }
-
- // Compute &a[i] as &a + i*width.
- a.Type = n.Type
-
- a.Xoffset += gc.Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width
- return
- }
- }
- }
-
- // release register for now, to avoid
- // confusing tempname.
- if res != nil && res.Op == gc.OREGISTER {
- reg[res.Val.U.Reg]--
- }
- var n1 gc.Node
- gc.Tempname(&n1, gc.Types[gc.Tptr])
- agen(n, &n1)
- if res != nil && res.Op == gc.OREGISTER {
- reg[res.Val.U.Reg]++
- }
- regalloc(a, gc.Types[gc.Tptr], res)
- gmove(&n1, a)
- a.Op = gc.OINDREG
- a.Type = n.Type
-}
-
-/*
- * branch gen
- * if(n == true) goto to;
- */
-func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\nbgen", n)
- }
-
- if n == nil {
- n = gc.Nodbool(true)
- }
-
- if n.Ninit != nil {
- gc.Genlist(n.Ninit)
- }
-
- if n.Type == nil {
- gc.Convlit(&n, gc.Types[gc.TBOOL])
- if n.Type == nil {
- return
- }
- }
-
- et := int(n.Type.Etype)
- if et != gc.TBOOL {
- gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
- gc.Patch(gins(obj.AEND, nil, nil), to)
- return
- }
-
- for n.Op == gc.OCONVNOP {
- n = n.Left
- if n.Ninit != nil {
- gc.Genlist(n.Ninit)
- }
- }
-
- nl := n.Left
- var nr *gc.Node
-
- if nl != nil && gc.Isfloat[nl.Type.Etype] {
- bgen_float(n, bool2int(true_), likely, to)
- return
- }
-
- switch n.Op {
- default:
- goto def
-
- // need to ask if it is bool?
- case gc.OLITERAL:
- if !true_ == (n.Val.U.Bval == 0) {
- gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
- }
- return
-
- case gc.ONAME:
- if n.Addable == 0 {
- goto def
- }
- var n1 gc.Node
- gc.Nodconst(&n1, n.Type, 0)
- gins(optoas(gc.OCMP, n.Type), n, &n1)
- a := x86.AJNE
- if !true_ {
- a = x86.AJEQ
- }
- gc.Patch(gc.Gbranch(a, n.Type, likely), to)
- return
-
- case gc.OANDAND,
- gc.OOROR:
- if (n.Op == gc.OANDAND) == true_ {
- p1 := gc.Gbranch(obj.AJMP, nil, 0)
- p2 := gc.Gbranch(obj.AJMP, nil, 0)
- gc.Patch(p1, gc.Pc)
- bgen(n.Left, !true_, -likely, p2)
- bgen(n.Right, !true_, -likely, p2)
- p1 = gc.Gbranch(obj.AJMP, nil, 0)
- gc.Patch(p1, to)
- gc.Patch(p2, gc.Pc)
- } else {
- bgen(n.Left, true_, likely, to)
- bgen(n.Right, true_, likely, to)
- }
-
- return
-
- case gc.OEQ,
- gc.ONE,
- gc.OLT,
- gc.OGT,
- gc.OLE,
- gc.OGE:
- nr = n.Right
- if nr == nil || nr.Type == nil {
- return
- }
- fallthrough
-
- case gc.ONOT: // unary
- nl = n.Left
-
- if nl == nil || nl.Type == nil {
- return
- }
- }
-
- switch n.Op {
- case gc.ONOT:
- bgen(nl, !true_, likely, to)
-
- case gc.OEQ,
- gc.ONE,
- gc.OLT,
- gc.OGT,
- gc.OLE,
- gc.OGE:
- a := int(n.Op)
- if !true_ {
- a = gc.Brcom(a)
- true_ = !true_
- }
-
- // make simplest on right
- if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
- a = gc.Brrev(a)
- r := nl
- nl = nr
- nr = r
- }
-
- if gc.Isslice(nl.Type) {
- // front end should only leave cmp to literal nil
- if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
- gc.Yyerror("illegal slice comparison")
- break
- }
-
- a = optoas(a, gc.Types[gc.Tptr])
- var n1 gc.Node
- igen(nl, &n1, nil)
- n1.Xoffset += int64(gc.Array_array)
- n1.Type = gc.Types[gc.Tptr]
- var tmp gc.Node
- gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
- gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
- gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
- regfree(&n1)
- break
- }
-
- if gc.Isinter(nl.Type) {
- // front end should only leave cmp to literal nil
- if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
- gc.Yyerror("illegal interface comparison")
- break
- }
-
- a = optoas(a, gc.Types[gc.Tptr])
- var n1 gc.Node
- igen(nl, &n1, nil)
- n1.Type = gc.Types[gc.Tptr]
- var tmp gc.Node
- gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
- gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
- gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
- regfree(&n1)
- break
- }
-
- if gc.Iscomplex[nl.Type.Etype] {
- gc.Complexbool(a, nl, nr, true_, likely, to)
- break
- }
-
- if gc.Is64(nr.Type) {
- if nl.Addable == 0 || gc.Isconst(nl, gc.CTINT) {
- var n1 gc.Node
- gc.Tempname(&n1, nl.Type)
- cgen(nl, &n1)
- nl = &n1
- }
-
- if nr.Addable == 0 {
- var n2 gc.Node
- gc.Tempname(&n2, nr.Type)
- cgen(nr, &n2)
- nr = &n2
- }
-
- cmp64(nl, nr, a, likely, to)
- break
- }
-
- var n2 gc.Node
- if nr.Ullman >= gc.UINF {
- if nl.Addable == 0 {
- var n1 gc.Node
- gc.Tempname(&n1, nl.Type)
- cgen(nl, &n1)
- nl = &n1
- }
-
- if nr.Addable == 0 {
- var tmp gc.Node
- gc.Tempname(&tmp, nr.Type)
- cgen(nr, &tmp)
- nr = &tmp
- }
-
- var n2 gc.Node
- regalloc(&n2, nr.Type, nil)
- cgen(nr, &n2)
- nr = &n2
- goto cmp
- }
-
- if nl.Addable == 0 {
- var n1 gc.Node
- gc.Tempname(&n1, nl.Type)
- cgen(nl, &n1)
- nl = &n1
- }
-
- if gc.Smallintconst(nr) {
- gins(optoas(gc.OCMP, nr.Type), nl, nr)
- gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
- break
- }
-
- if nr.Addable == 0 {
- var tmp gc.Node
- gc.Tempname(&tmp, nr.Type)
- cgen(nr, &tmp)
- nr = &tmp
- }
-
- regalloc(&n2, nr.Type, nil)
- gmove(nr, &n2)
- nr = &n2
-
- cmp:
- gins(optoas(gc.OCMP, nr.Type), nl, nr)
- gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
-
- if nl.Op == gc.OREGISTER {
- regfree(nl)
- }
- regfree(nr)
- }
-
- return
-
-def:
- var n1 gc.Node
- regalloc(&n1, n.Type, nil)
- cgen(n, &n1)
- var n2 gc.Node
- gc.Nodconst(&n2, n.Type, 0)
- gins(optoas(gc.OCMP, n.Type), &n1, &n2)
- a := x86.AJNE
- if !true_ {
- a = x86.AJEQ
- }
- gc.Patch(gc.Gbranch(a, n.Type, likely), to)
- regfree(&n1)
- return
-}
-
-/*
- * n is on stack, either local variable
- * or return value from function call.
- * return n's offset from SP.
- */
-func stkof(n *gc.Node) int32 {
- switch n.Op {
- case gc.OINDREG:
- return int32(n.Xoffset)
-
- case gc.ODOT:
- t := n.Left.Type
- if gc.Isptr[t.Etype] {
- break
- }
- off := stkof(n.Left)
- if off == -1000 || off == 1000 {
- return off
- }
- return int32(int64(off) + n.Xoffset)
-
- case gc.OINDEX:
- t := n.Left.Type
- if !gc.Isfixedarray(t) {
- break
- }
- off := stkof(n.Left)
- if off == -1000 || off == 1000 {
- return off
- }
- if gc.Isconst(n.Right, gc.CTINT) {
- return int32(int64(off) + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval))
- }
- return 1000
-
- case gc.OCALLMETH,
- gc.OCALLINTER,
- gc.OCALLFUNC:
- t := n.Left.Type
- if gc.Isptr[t.Etype] {
- t = t.Type
- }
-
- var flist gc.Iter
- t = gc.Structfirst(&flist, gc.Getoutarg(t))
- if t != nil {
- return int32(t.Width)
- }
- }
-
- // botch - probably failing to recognize address
- // arithmetic on the above. eg INDEX and DOT
- return -1000
-}
-
-/*
- * struct gen
- * memmove(&res, &n, w);
- */
-func sgen(n *gc.Node, res *gc.Node, w int64) {
- if gc.Debug['g'] != 0 {
- fmt.Printf("\nsgen w=%d\n", w)
- gc.Dump("r", n)
- gc.Dump("res", res)
- }
-
- if n.Ullman >= gc.UINF && res.Ullman >= gc.UINF {
- gc.Fatal("sgen UINF")
- }
-
- if w < 0 || int64(int32(w)) != w {
- gc.Fatal("sgen copy %d", w)
- }
-
- if w == 0 {
- // evaluate side effects only.
- var tdst gc.Node
- gc.Tempname(&tdst, gc.Types[gc.Tptr])
-
- agen(res, &tdst)
- agen(n, &tdst)
- return
- }
-
- // If copying .args, that's all the results, so record definition sites
- // for them for the liveness analysis.
- if res.Op == gc.ONAME && res.Sym.Name == ".args" {
- for l := gc.Curfn.Dcl; l != nil; l = l.Next {
- if l.N.Class == gc.PPARAMOUT {
- gc.Gvardef(l.N)
- }
- }
- }
-
- // Avoid taking the address for simple enough types.
- if gc.Componentgen(n, res) {
- return
- }
-
- // offset on the stack
- osrc := stkof(n)
-
- odst := stkof(res)
-
- if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
- // osrc and odst both on stack, and at least one is in
- // an unknown position. Could generate code to test
- // for forward/backward copy, but instead just copy
- // to a temporary location first.
- var tsrc gc.Node
- gc.Tempname(&tsrc, n.Type)
-
- sgen(n, &tsrc, w)
- sgen(&tsrc, res, w)
- return
- }
-
+func stackcopy(n, res *gc.Node, osrc, odst, w int64) {
var dst gc.Node
gc.Nodreg(&dst, gc.Types[gc.Tptr], x86.REG_DI)
var src gc.Node
var tdst gc.Node
gc.Tempname(&tdst, gc.Types[gc.Tptr])
if n.Addable == 0 {
- agen(n, &tsrc)
+ gc.Agen(n, &tsrc)
}
if res.Addable == 0 {
- agen(res, &tdst)
+ gc.Agen(res, &tdst)
}
if n.Addable != 0 {
- agen(n, &src)
+ gc.Agen(n, &src)
} else {
gmove(&tsrc, &src)
}
}
if res.Addable != 0 {
- agen(res, &dst)
+ gc.Agen(res, &dst)
} else {
gmove(&tdst, &dst)
}
gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
case gc.OMINUS:
- cgen(n.Left, res)
+ gc.Cgen(n.Left, res)
var hi1 gc.Node
var lo1 gc.Node
split64(res, &lo1, &hi1)
return
case gc.OCOM:
- cgen(n.Left, res)
+ gc.Cgen(n.Left, res)
var lo1 gc.Node
var hi1 gc.Node
split64(res, &lo1, &hi1)
if l.Addable == 0 {
var t1 gc.Node
gc.Tempname(&t1, l.Type)
- cgen(l, &t1)
+ gc.Cgen(l, &t1)
l = &t1
}
if r != nil && r.Addable == 0 {
var t2 gc.Node
gc.Tempname(&t2, r.Type)
- cgen(r, &t2)
+ gc.Cgen(r, &t2)
r = &t2
}
// let's call the next two EX and FX.
case gc.OMUL:
var ex gc.Node
- regalloc(&ex, gc.Types[gc.TPTR32], nil)
+ gc.Regalloc(&ex, gc.Types[gc.TPTR32], nil)
var fx gc.Node
- regalloc(&fx, gc.Types[gc.TPTR32], nil)
+ gc.Regalloc(&fx, gc.Types[gc.TPTR32], nil)
// load args into DX:AX and EX:CX.
gins(x86.AMOVL, &lo1, &ax)
gins(x86.AADDL, &fx, &dx)
gc.Patch(p2, gc.Pc)
- regfree(&ex)
- regfree(&fx)
+ gc.Regfree(&ex)
+ gc.Regfree(&fx)
// We only rotate by a constant c in [0,64).
// if c >= 32:
if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
gins(x86.ACMPL, &hi1, &hi2)
} else {
- regalloc(&rr, gc.Types[gc.TINT32], nil)
+ gc.Regalloc(&rr, gc.Types[gc.TINT32], nil)
gins(x86.AMOVL, &hi1, &rr)
gins(x86.ACMPL, &rr, &hi2)
- regfree(&rr)
+ gc.Regfree(&rr)
}
var br *obj.Prog
if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
gins(x86.ACMPL, &lo1, &lo2)
} else {
- regalloc(&rr, gc.Types[gc.TINT32], nil)
+ gc.Regalloc(&rr, gc.Types[gc.TINT32], nil)
gins(x86.AMOVL, &lo1, &rr)
gins(x86.ACMPL, &rr, &lo2)
- regfree(&rr)
+ gc.Regfree(&rr)
}
// jump again
gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = x86.REGSP
gc.Thearch.REGCTXT = x86.REGCTXT
+ gc.Thearch.REGCALLX = x86.REG_BX
+ gc.Thearch.REGCALLX2 = x86.REG_AX
+ gc.Thearch.REGRETURN = x86.REG_AX
+ gc.Thearch.REGMIN = x86.REG_AX
+ gc.Thearch.REGMAX = x86.REG_DI
+ gc.Thearch.FREGMIN = x86.REG_X0
+ gc.Thearch.FREGMAX = x86.REG_X7
gc.Thearch.MAXWIDTH = MAXWIDTH
- gc.Thearch.Anyregalloc = anyregalloc
+ gc.Thearch.ReservedRegs = resvd
+
gc.Thearch.Betypeinit = betypeinit
- gc.Thearch.Bgen = bgen
- gc.Thearch.Cgen = cgen
- gc.Thearch.Cgen_call = cgen_call
- gc.Thearch.Cgen_callinter = cgen_callinter
- gc.Thearch.Cgen_ret = cgen_ret
+ gc.Thearch.Bgen_float = bgen_float
+ gc.Thearch.Cgen64 = cgen64
+ gc.Thearch.Cgen_bmul = cgen_bmul
+ gc.Thearch.Cgen_float = cgen_float
+ gc.Thearch.Cgen_hmul = cgen_hmul
+ gc.Thearch.Cgen_shift = cgen_shift
gc.Thearch.Clearfat = clearfat
+ gc.Thearch.Cmp64 = cmp64
gc.Thearch.Defframe = defframe
+ gc.Thearch.Dodiv = cgen_div
gc.Thearch.Excise = excise
gc.Thearch.Expandchecks = expandchecks
- gc.Thearch.Gclean = gclean
- gc.Thearch.Ginit = ginit
gc.Thearch.Gins = gins
- gc.Thearch.Ginscall = ginscall
+ gc.Thearch.Ginscon = ginscon
+ gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Gmove = gmove
- gc.Thearch.Igen = igen
+ gc.Thearch.Igenindex = igenindex
gc.Thearch.Linkarchinit = linkarchinit
gc.Thearch.Peep = peep
gc.Thearch.Proginfo = proginfo
- gc.Thearch.Regalloc = regalloc
- gc.Thearch.Regfree = regfree
gc.Thearch.Regtyp = regtyp
gc.Thearch.Sameaddr = sameaddr
gc.Thearch.Smallindir = smallindir
gc.Thearch.Stackaddr = stackaddr
+ gc.Thearch.Stackcopy = stackcopy
+ gc.Thearch.Sudoaddable = sudoaddable
+ gc.Thearch.Sudoclean = sudoclean
gc.Thearch.Excludedregs = excludedregs
gc.Thearch.RtoB = RtoB
gc.Thearch.FtoB = FtoB
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
- regalloc(&n1, gc.Types[gc.Tptr], nil)
+ gc.Regalloc(&n1, gc.Types[gc.Tptr], nil)
- agen(nl, &n1)
+ gc.Agen(nl, &n1)
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
n1.Xoffset++
}
- regfree(&n1)
+ gc.Regfree(&n1)
return
}
var n1 gc.Node
gc.Nodreg(&n1, gc.Types[gc.Tptr], x86.REG_DI)
- agen(nl, &n1)
+ gc.Agen(nl, &n1)
gconreg(x86.AMOVL, 0, x86.REG_AX)
if q > 128 || (q >= 4 && gc.Nacl) {
}
}
-/*
- * generate:
- * call f
- * proc=-1 normal call but no return
- * proc=0 normal call
- * proc=1 goroutine run in new proc
- * proc=2 defer call save away stack
- * proc=3 normal call to C pointer (not Go func value)
-*/
-func ginscall(f *gc.Node, proc int) {
- if f.Type != nil {
- extra := int32(0)
- if proc == 1 || proc == 2 {
- extra = 2 * int32(gc.Widthptr)
- }
- gc.Setmaxarg(f.Type, extra)
- }
-
- switch proc {
- default:
- gc.Fatal("ginscall: bad proc %d", proc)
-
- case 0, // normal call
- -1: // normal call but no return
- if f.Op == gc.ONAME && f.Class == gc.PFUNC {
- if f == gc.Deferreturn {
- // Deferred calls will appear to be returning to
- // the CALL deferreturn(SB) that we are about to emit.
- // However, the stack trace code will show the line
- // of the instruction byte before the return PC.
- // To avoid that being an unrelated instruction,
- // insert an x86 NOP that we will have the right line number.
- // x86 NOP 0x90 is really XCHG AX, AX; use that description
- // because the NOP pseudo-instruction will be removed by
- // the linker.
- var reg gc.Node
- gc.Nodreg(®, gc.Types[gc.TINT], x86.REG_AX)
-
- gins(x86.AXCHGL, ®, ®)
- }
-
- p := gins(obj.ACALL, nil, f)
- gc.Afunclit(&p.To, f)
- if proc == -1 || gc.Noreturn(p) {
- gins(obj.AUNDEF, nil, nil)
- }
- break
- }
-
- var reg gc.Node
- gc.Nodreg(®, gc.Types[gc.Tptr], x86.REG_DX)
- var r1 gc.Node
- gc.Nodreg(&r1, gc.Types[gc.Tptr], x86.REG_BX)
- gmove(f, ®)
- reg.Op = gc.OINDREG
- gmove(®, &r1)
- reg.Op = gc.OREGISTER
- gins(obj.ACALL, ®, &r1)
-
- case 3: // normal call of c function pointer
- gins(obj.ACALL, nil, f)
-
- case 1, // call in new proc (go)
- 2: // deferred call (defer)
- var stk gc.Node
-
- stk.Op = gc.OINDREG
- stk.Val.U.Reg = x86.REG_SP
- stk.Xoffset = 0
-
- // size of arguments at 0(SP)
- var con gc.Node
- gc.Nodconst(&con, gc.Types[gc.TINT32], int64(gc.Argsize(f.Type)))
-
- gins(x86.AMOVL, &con, &stk)
-
- // FuncVal* at 4(SP)
- stk.Xoffset = int64(gc.Widthptr)
-
- gins(x86.AMOVL, f, &stk)
-
- if proc == 1 {
- ginscall(gc.Newproc, 0)
- } else {
- ginscall(gc.Deferproc, 0)
- }
- if proc == 2 {
- var reg gc.Node
- gc.Nodreg(®, gc.Types[gc.TINT32], x86.REG_AX)
- gins(x86.ATESTL, ®, ®)
- p := gc.Gbranch(x86.AJEQ, nil, +1)
- cgen_ret(nil)
- gc.Patch(p, gc.Pc)
- }
- }
-}
-
-/*
- * n is call to interface method.
- * generate res = n.
- */
-func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
- i := n.Left
- if i.Op != gc.ODOTINTER {
- gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
- }
-
- f := i.Right // field
- if f.Op != gc.ONAME {
- gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
- }
-
- i = i.Left // interface
-
- if i.Addable == 0 {
- var tmpi gc.Node
- gc.Tempname(&tmpi, i.Type)
- cgen(i, &tmpi)
- i = &tmpi
- }
-
- gc.Genlist(n.List) // assign the args
-
- // i is now addable, prepare an indirected
- // register to hold its address.
- var nodi gc.Node
- igen(i, &nodi, res) // REG = &inter
-
- var nodsp gc.Node
- gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], x86.REG_SP)
-
- nodsp.Xoffset = 0
- if proc != 0 {
- nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
- }
- nodi.Type = gc.Types[gc.Tptr]
- nodi.Xoffset += int64(gc.Widthptr)
- cgen(&nodi, &nodsp) // {0 or 8}(SP) = 4(REG) -- i.data
-
- var nodo gc.Node
- regalloc(&nodo, gc.Types[gc.Tptr], res)
-
- nodi.Type = gc.Types[gc.Tptr]
- nodi.Xoffset -= int64(gc.Widthptr)
- cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
- regfree(&nodi)
-
- var nodr gc.Node
- regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
- if n.Left.Xoffset == gc.BADWIDTH {
- gc.Fatal("cgen_callinter: badwidth")
- }
- gc.Cgen_checknil(&nodo)
- nodo.Op = gc.OINDREG
- nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
-
- if proc == 0 {
- // plain call: use direct c function pointer - more efficient
- cgen(&nodo, &nodr) // REG = 20+offset(REG) -- i.tab->fun[f]
- proc = 3
- } else {
- // go/defer. generate go func value.
- gins(x86.ALEAL, &nodo, &nodr) // REG = &(20+offset(REG)) -- i.tab->fun[f]
- }
-
- nodr.Type = n.Left.Type
- ginscall(&nodr, proc)
-
- regfree(&nodr)
- regfree(&nodo)
-}
-
-/*
- * generate function call;
- * proc=0 normal call
- * proc=1 goroutine run in new proc
- * proc=2 defer call save away stack
- */
-func cgen_call(n *gc.Node, proc int) {
- if n == nil {
- return
- }
-
- var afun gc.Node
- if n.Left.Ullman >= gc.UINF {
- // if name involves a fn call
- // precompute the address of the fn
- gc.Tempname(&afun, gc.Types[gc.Tptr])
-
- cgen(n.Left, &afun)
- }
-
- gc.Genlist(n.List) // assign the args
- t := n.Left.Type
-
- // call tempname pointer
- if n.Left.Ullman >= gc.UINF {
- var nod gc.Node
- regalloc(&nod, gc.Types[gc.Tptr], nil)
- gc.Cgen_as(&nod, &afun)
- nod.Type = t
- ginscall(&nod, proc)
- regfree(&nod)
- return
- }
-
- // call pointer
- if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
- var nod gc.Node
- regalloc(&nod, gc.Types[gc.Tptr], nil)
- gc.Cgen_as(&nod, n.Left)
- nod.Type = t
- ginscall(&nod, proc)
- regfree(&nod)
- return
- }
-
- // call direct
- n.Left.Method = 1
-
- ginscall(n.Left, proc)
-}
-
-/*
- * call to n has already been generated.
- * generate:
- * res = return value from call.
- */
-func cgen_callret(n *gc.Node, res *gc.Node) {
- t := n.Left.Type
- if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
- t = t.Type
- }
-
- var flist gc.Iter
- fp := gc.Structfirst(&flist, gc.Getoutarg(t))
- if fp == nil {
- gc.Fatal("cgen_callret: nil")
- }
-
- var nod gc.Node
- nod.Op = gc.OINDREG
- nod.Val.U.Reg = x86.REG_SP
- nod.Addable = 1
-
- nod.Xoffset = fp.Width
- nod.Type = fp.Type
- gc.Cgen_as(res, &nod)
-}
-
-/*
- * call to n has already been generated.
- * generate:
- * res = &return value from call.
- */
-func cgen_aret(n *gc.Node, res *gc.Node) {
- t := n.Left.Type
- if gc.Isptr[t.Etype] {
- t = t.Type
- }
-
- var flist gc.Iter
- fp := gc.Structfirst(&flist, gc.Getoutarg(t))
- if fp == nil {
- gc.Fatal("cgen_aret: nil")
- }
-
- var nod1 gc.Node
- nod1.Op = gc.OINDREG
- nod1.Val.U.Reg = x86.REG_SP
- nod1.Addable = 1
-
- nod1.Xoffset = fp.Width
- nod1.Type = fp.Type
-
- if res.Op != gc.OREGISTER {
- var nod2 gc.Node
- regalloc(&nod2, gc.Types[gc.Tptr], res)
- gins(x86.ALEAL, &nod1, &nod2)
- gins(x86.AMOVL, &nod2, res)
- regfree(&nod2)
- } else {
- gins(x86.ALEAL, &nod1, res)
- }
-}
-
-/*
- * generate return.
- * n->left is assignments to return values.
- */
-func cgen_ret(n *gc.Node) {
- if n != nil {
- gc.Genlist(n.List) // copy out args
- }
- if gc.Hasdefer != 0 {
- ginscall(gc.Deferreturn, 0)
- }
- gc.Genlist(gc.Curfn.Exit)
- p := gins(obj.ARET, nil, nil)
- if n != nil && n.Op == gc.ORETJMP {
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.Linksym(n.Left.Sym)
- }
-}
-
/*
* generate division.
* caller must set:
gc.Tempname(&t3, t0)
var t4 gc.Node
gc.Tempname(&t4, t0)
- cgen(nl, &t3)
- cgen(nr, &t4)
+ gc.Cgen(nl, &t3)
+ gc.Cgen(nr, &t4)
// Convert.
gmove(&t3, &t1)
gmove(&t4, &t2)
} else {
- cgen(nl, &t1)
- cgen(nr, &t2)
+ gc.Cgen(nl, &t1)
+ gc.Cgen(nr, &t2)
}
var n1 gc.Node
if !gc.Samereg(ax, res) && !gc.Samereg(dx, res) {
- regalloc(&n1, t, res)
+ gc.Regalloc(&n1, t, res)
} else {
- regalloc(&n1, t, nil)
+ gc.Regalloc(&n1, t, nil)
}
gmove(&t2, &n1)
gmove(&t1, ax)
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
- ginscall(panicdiv, -1)
+ gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
}
gins(optoas(gc.OEXTEND, t), nil, nil)
}
gins(optoas(op, t), &n1, nil)
- regfree(&n1)
+ gc.Regfree(&n1)
if op == gc.ODIV {
gmove(ax, res)
gmove(x, oldx)
}
- regalloc(x, t, x)
+ gc.Regalloc(x, t, x)
}
func restx(x *gc.Node, oldx *gc.Node) {
- regfree(x)
+ gc.Regfree(x)
if oldx.Op != 0 {
x.Type = gc.Types[gc.TINT32]
if nr.Op == gc.OLITERAL {
var n2 gc.Node
gc.Tempname(&n2, nl.Type)
- cgen(nl, &n2)
+ gc.Cgen(nl, &n2)
var n1 gc.Node
- regalloc(&n1, nl.Type, res)
+ gc.Regalloc(&n1, nl.Type, res)
gmove(&n2, &n1)
sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if sc >= uint64(nl.Type.Width*8) {
gins(a, nr, &n1)
}
gmove(&n1, res)
- regfree(&n1)
+ gc.Regfree(&n1)
return
}
n1 = nt
} else {
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
- regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
+ gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
}
var n2 gc.Node
if gc.Samereg(&cx, res) {
- regalloc(&n2, nl.Type, nil)
+ gc.Regalloc(&n2, nl.Type, nil)
} else {
- regalloc(&n2, nl.Type, res)
+ gc.Regalloc(&n2, nl.Type, res)
}
if nl.Ullman >= nr.Ullman {
- cgen(nl, &n2)
- cgen(nr, &n1)
+ gc.Cgen(nl, &n2)
+ gc.Cgen(nr, &n1)
} else {
- cgen(nr, &n1)
- cgen(nl, &n2)
+ gc.Cgen(nr, &n1)
+ gc.Cgen(nl, &n2)
}
// test and fix up large shifts
// delayed reg alloc
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
- regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
+ gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
var lo gc.Node
var hi gc.Node
split64(&nt, &lo, &hi)
// delayed reg alloc
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
- regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
+ gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
var lo gc.Node
var hi gc.Node
split64(&nt, &lo, &hi)
gmove(&n2, res)
- regfree(&n1)
- regfree(&n2)
+ gc.Regfree(&n1)
+ gc.Regfree(&n2)
}
/*
* there is no 2-operand byte multiply instruction so
* we do a full-width multiplication and truncate afterwards.
*/
-func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
+ if optoas(op, nl.Type) != x86.AIMULB {
+ return false
+ }
+
// copy from byte to full registers
t := gc.Types[gc.TUINT32]
var nt gc.Node
gc.Tempname(&nt, nl.Type)
- cgen(nl, &nt)
+ gc.Cgen(nl, &nt)
var n1 gc.Node
- regalloc(&n1, t, res)
- cgen(nr, &n1)
+ gc.Regalloc(&n1, t, res)
+ gc.Cgen(nr, &n1)
var n2 gc.Node
- regalloc(&n2, t, nil)
+ gc.Regalloc(&n2, t, nil)
gmove(&nt, &n2)
a := optoas(op, t)
gins(a, &n2, &n1)
- regfree(&n2)
+ gc.Regfree(&n2)
gmove(&n1, res)
- regfree(&n1)
+ gc.Regfree(&n1)
+
+ return true
}
/*
// gen nl in n1.
gc.Tempname(&n1, t)
- cgen(nl, &n1)
+ gc.Cgen(nl, &n1)
// gen nr in n2.
- regalloc(&n2, t, res)
+ gc.Regalloc(&n2, t, res)
- cgen(nr, &n2)
+ gc.Cgen(nr, &n2)
// multiply.
gc.Nodreg(&ax, t, x86.REG_AX)
gmove(&n2, &ax)
gins(a, &n1, nil)
- regfree(&n2)
+ gc.Regfree(&n2)
if t.Width == 1 {
// byte multiply behaves differently.
gmove(gc.Nodbool(true), res)
p3 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
- bgen(n, true, 0, p2)
+ gc.Bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
return
case gc.OPLUS:
- cgen(nl, res)
+ gc.Cgen(nl, res)
return
case gc.OCONV:
if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
- cgen(nl, res)
+ gc.Cgen(nl, res)
return
}
var n2 gc.Node
gc.Tempname(&n2, n.Type)
var n1 gc.Node
- mgen(nl, &n1, res)
+ gc.Mgen(nl, &n1, res)
gmove(&n1, &n2)
gmove(&n2, res)
- mfree(&n1)
+ gc.Mfree(&n1)
return
}
if nr != nil {
// binary
if nl.Ullman >= nr.Ullman {
- cgen(nl, &f0)
+ gc.Cgen(nl, &f0)
if nr.Addable != 0 {
gins(foptoas(int(n.Op), n.Type, 0), nr, &f0)
} else {
- cgen(nr, &f0)
+ gc.Cgen(nr, &f0)
gins(foptoas(int(n.Op), n.Type, Fpop), &f0, &f1)
}
} else {
- cgen(nr, &f0)
+ gc.Cgen(nr, &f0)
if nl.Addable != 0 {
gins(foptoas(int(n.Op), n.Type, Frev), nl, &f0)
} else {
- cgen(nl, &f0)
+ gc.Cgen(nl, &f0)
gins(foptoas(int(n.Op), n.Type, Frev|Fpop), &f0, &f1)
}
}
}
// unary
- cgen(nl, &f0)
+ gc.Cgen(nl, &f0)
if n.Op != gc.OCONV && n.Op != gc.OPLUS {
gins(foptoas(int(n.Op), n.Type, 0), nil, nil)
if nl.Ullman >= nr.Ullman {
var nt gc.Node
gc.Tempname(&nt, nl.Type)
- cgen(nl, &nt)
+ gc.Cgen(nl, &nt)
var n2 gc.Node
- mgen(nr, &n2, nil)
+ gc.Mgen(nr, &n2, nil)
var n1 gc.Node
- regalloc(&n1, nl.Type, res)
+ gc.Regalloc(&n1, nl.Type, res)
gmove(&nt, &n1)
gins(a, &n2, &n1)
gmove(&n1, res)
- regfree(&n1)
- mfree(&n2)
+ gc.Regfree(&n1)
+ gc.Mfree(&n2)
} else {
var n2 gc.Node
- regalloc(&n2, nr.Type, res)
- cgen(nr, &n2)
+ gc.Regalloc(&n2, nr.Type, res)
+ gc.Cgen(nr, &n2)
var n1 gc.Node
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
+ gc.Regalloc(&n1, nl.Type, nil)
+ gc.Cgen(nl, &n1)
gins(a, &n2, &n1)
- regfree(&n2)
+ gc.Regfree(&n2)
gmove(&n1, res)
- regfree(&n1)
+ gc.Regfree(&n1)
}
return
if nl.Addable == 0 {
var n1 gc.Node
gc.Tempname(&n1, nl.Type)
- cgen(nl, &n1)
+ gc.Cgen(nl, &n1)
nl = &n1
}
if nr.Addable == 0 {
var tmp gc.Node
gc.Tempname(&tmp, nr.Type)
- cgen(nr, &tmp)
+ gc.Cgen(nr, &tmp)
nr = &tmp
}
var n2 gc.Node
- regalloc(&n2, nr.Type, nil)
+ gc.Regalloc(&n2, nr.Type, nil)
gmove(nr, &n2)
nr = &n2
if nl.Op != gc.OREGISTER {
var n3 gc.Node
- regalloc(&n3, nl.Type, nil)
+ gc.Regalloc(&n3, nl.Type, nil)
gmove(nl, &n3)
nl = &n3
}
gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr)
if nl.Op == gc.OREGISTER {
- regfree(nl)
+ gc.Regfree(nl)
}
- regfree(nr)
+ gc.Regfree(nr)
goto ret
} else {
goto x87
et = gc.Simsimtype(nr.Type)
if et == gc.TFLOAT64 {
if nl.Ullman > nr.Ullman {
- cgen(nl, &tmp)
- cgen(nr, &tmp)
+ gc.Cgen(nl, &tmp)
+ gc.Cgen(nr, &tmp)
gins(x86.AFXCHD, &tmp, &n2)
} else {
- cgen(nr, &tmp)
- cgen(nl, &tmp)
+ gc.Cgen(nr, &tmp)
+ gc.Cgen(nl, &tmp)
}
gins(x86.AFUCOMIP, &tmp, &n2)
var t2 gc.Node
gc.Tempname(&t2, gc.Types[gc.TFLOAT32])
- cgen(nr, &t1)
- cgen(nl, &t2)
+ gc.Cgen(nr, &t1)
+ gc.Cgen(nl, &t2)
gmove(&t2, &tmp)
gins(x86.AFCOMFP, &t1, &tmp)
gins(x86.AFSTSW, nil, &ax)
p2.To.Offset = 0
}
}
+
+// addr += index*width if possible.
+func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
+ switch width {
+ case 1, 2, 4, 8:
+ p1 := gins(x86.ALEAL, index, addr)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Scale = int16(width)
+ p1.From.Index = p1.From.Reg
+ p1.From.Reg = p1.To.Reg
+ return true
+ }
+ return false
+}
x86.REG_CX, // for shift
x86.REG_DX, // for divide
x86.REG_SP, // for stack
-
- x86.REG_BL, // because REG_BX can be allocated
- x86.REG_BH,
-}
-
-func ginit() {
- for i := 0; i < len(reg); i++ {
- reg[i] = 1
- }
- for i := x86.REG_AX; i <= x86.REG_DI; i++ {
- reg[i] = 0
- }
- for i := x86.REG_X0; i <= x86.REG_X7; i++ {
- reg[i] = 0
- }
- for i := 0; i < len(resvd); i++ {
- reg[resvd[i]]++
- }
-}
-
-var regpc [x86.MAXREG]uint32
-
-func gclean() {
- for i := 0; i < len(resvd); i++ {
- reg[resvd[i]]--
- }
-
- for i := x86.REG_AX; i <= x86.REG_DI; i++ {
- if reg[i] != 0 {
- gc.Yyerror("reg %v left allocated at %x", obj.Rconv(i), regpc[i])
- }
- }
- for i := x86.REG_X0; i <= x86.REG_X7; i++ {
- if reg[i] != 0 {
- gc.Yyerror("reg %v left allocated\n", obj.Rconv(i))
- }
- }
-}
-
-func anyregalloc() bool {
- var j int
-
- for i := x86.REG_AX; i <= x86.REG_DI; i++ {
- if reg[i] == 0 {
- goto ok
- }
- for j = 0; j < len(resvd); j++ {
- if resvd[j] == i {
- goto ok
- }
- }
- return true
- ok:
- }
-
- for i := x86.REG_X0; i <= x86.REG_X7; i++ {
- if reg[i] != 0 {
- return true
- }
- }
- return false
-}
-
-/*
- * allocate register of type t, leave in n.
- * if o != N, o is desired fixed register.
- * caller must regfree(n).
- */
-func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
- if t == nil {
- gc.Fatal("regalloc: t nil")
- }
- et := int(gc.Simtype[t.Etype])
-
- var i int
- switch et {
- case gc.TINT64,
- gc.TUINT64:
- gc.Fatal("regalloc64")
-
- case gc.TINT8,
- gc.TUINT8,
- gc.TINT16,
- gc.TUINT16,
- gc.TINT32,
- gc.TUINT32,
- gc.TPTR32,
- gc.TPTR64,
- gc.TBOOL:
- if o != nil && o.Op == gc.OREGISTER {
- i = int(o.Val.U.Reg)
- if i >= x86.REG_AX && i <= x86.REG_DI {
- goto out
- }
- }
-
- for i = x86.REG_AX; i <= x86.REG_DI; i++ {
- if reg[i] == 0 {
- goto out
- }
- }
-
- fmt.Printf("registers allocated at\n")
- for i := x86.REG_AX; i <= x86.REG_DI; i++ {
- fmt.Printf("\t%v\t%#x\n", obj.Rconv(i), regpc[i])
- }
- gc.Fatal("out of fixed registers")
- goto err
-
- case gc.TFLOAT32,
- gc.TFLOAT64:
- if gc.Use_sse == 0 {
- i = x86.REG_F0
- goto out
- }
-
- if o != nil && o.Op == gc.OREGISTER {
- i = int(o.Val.U.Reg)
- if i >= x86.REG_X0 && i <= x86.REG_X7 {
- goto out
- }
- }
-
- for i = x86.REG_X0; i <= x86.REG_X7; i++ {
- if reg[i] == 0 {
- goto out
- }
- }
- fmt.Printf("registers allocated at\n")
- for i := x86.REG_X0; i <= x86.REG_X7; i++ {
- fmt.Printf("\t%v\t%#x\n", obj.Rconv(i), regpc[i])
- }
- gc.Fatal("out of floating registers")
- }
-
- gc.Yyerror("regalloc: unknown type %v", gc.Tconv(t, 0))
-
-err:
- gc.Nodreg(n, t, 0)
- return
-
-out:
- if i == x86.REG_SP {
- fmt.Printf("alloc SP\n")
- }
- if reg[i] == 0 {
- regpc[i] = uint32(obj.Getcallerpc(&n))
- if i == x86.REG_AX || i == x86.REG_CX || i == x86.REG_DX || i == x86.REG_SP {
- gc.Dump("regalloc-o", o)
- gc.Fatal("regalloc %v", obj.Rconv(i))
- }
- }
-
- reg[i]++
- gc.Nodreg(n, t, i)
-}
-
-func regfree(n *gc.Node) {
- if n.Op == gc.ONAME {
- return
- }
- if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
- gc.Fatal("regfree: not a register")
- }
- i := int(n.Val.U.Reg)
- if i == x86.REG_SP {
- return
- }
- if i < 0 || i >= len(reg) {
- gc.Fatal("regfree: reg out of range")
- }
- if reg[i] <= 0 {
- gc.Fatal("regfree: reg not allocated")
- }
- reg[i]--
- if reg[i] == 0 && (i == x86.REG_AX || i == x86.REG_CX || i == x86.REG_DX || i == x86.REG_SP) {
- gc.Fatal("regfree %v", obj.Rconv(i))
- }
}
/*
gins(as, &n1, &n2)
}
+/*
+ * generate
+ * as $c, n
+ */
+func ginscon(as int, c int64, n2 *gc.Node) {
+ var n1 gc.Node
+ gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
+ gins(as, &n1, n2)
+}
+
/*
* swap node contents
*/
default:
var n1 gc.Node
if !dotaddable(n, &n1) {
- igen(n, &n1, nil)
+ gc.Igen(n, &n1, nil)
sclean[nsclean-1] = n1
}
case gc.ONAME:
if n.Class == gc.PPARAMREF {
var n1 gc.Node
- cgen(n.Heapaddr, &n1)
+ gc.Cgen(n.Heapaddr, &n1)
sclean[nsclean-1] = n1
n = &n1
}
}
nsclean--
if sclean[nsclean].Op != gc.OEMPTY {
- regfree(&sclean[nsclean])
+ gc.Regfree(&sclean[nsclean])
}
}
// requires register source
rsrc:
- regalloc(&r1, f.Type, t)
+ gc.Regalloc(&r1, f.Type, t)
gmove(f, &r1)
gins(a, &r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
// requires register destination
rdst:
{
- regalloc(&r1, t.Type, t)
+ gc.Regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
}
// requires register intermediate
hard:
- regalloc(&r1, cvt, t)
+ gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
}
// requires register intermediate
hard:
- regalloc(&r1, cvt, t)
+ gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
// requires memory intermediate
// requires register intermediate
hard:
- regalloc(&r1, cvt, t)
+ gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
// requires memory intermediate
// requires register intermediate
hard:
- regalloc(&r1, cvt, t)
+ gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
// requires memory intermediate
// requires register destination
rdst:
- regalloc(&r1, t.Type, t)
+ gc.Regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
}
gc.Fatal("gins MOVSD into F0")
}
+ if as == x86.AMOVL && f != nil && f.Op == gc.OADDR && f.Left.Op == gc.ONAME && f.Left.Class != gc.PEXTERN && f.Left.Class != gc.PFUNC {
+ // Turn MOVL $xxx(FP/SP) into LEAL xxx.
+ // These should be equivalent but most of the backend
+ // only expects to see LEAL, because that's what we had
+ // historically generated. Various hidden assumptions are baked in by now.
+ as = x86.ALEAL
+ f = f.Left
+ }
+
switch as {
case x86.AMOVB,
x86.AMOVW,
return p
}
+func ginsnop() {
+ var reg gc.Node
+ gc.Nodreg(®, gc.Types[gc.TINT], x86.REG_AX)
+ gins(x86.AXCHGL, ®, ®)
+}
+
func dotaddable(n *gc.Node, n1 *gc.Node) bool {
if n.Op != gc.ODOT {
return false
if regtyp(v) {
return true
}
- if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
+ if (v.Type == obj.TYPE_MEM || v.Type == obj.TYPE_ADDR) && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
if v.Offset == a.Offset {
return true
}
if regtyp(v) {
return true
}
- if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
+ if (v.Type == obj.TYPE_MEM || v.Type == obj.TYPE_ADDR) && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
if v.Offset == a.Offset {
return true
}
return true
}
if regtyp(v) {
- if a.Type == obj.TYPE_MEM && a.Reg == v.Reg {
+ if (a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && a.Reg == v.Reg {
return true
}
if a.Index == v.Reg {
if regtyp(v) {
reg := int(v.Reg)
- if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
+ if (a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && int(a.Reg) == reg {
if (s.Reg == x86.REG_BP) && a.Index != obj.TYPE_NONE {
return 1 /* can't use BP-base with index */
}
"cmd/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
- "fmt"
)
-/*
- * peep.c
- */
-/*
- * generate:
- * res = n;
- * simplifies and calls gmove.
- */
-func cgen(n *gc.Node, res *gc.Node) {
- //print("cgen %N(%d) -> %N(%d)\n", n, n->addable, res, res->addable);
- if gc.Debug['g'] != 0 {
- gc.Dump("\ncgen-n", n)
- gc.Dump("cgen-res", res)
- }
-
- if n == nil || n.Type == nil {
- return
- }
-
- if res == nil || res.Type == nil {
- gc.Fatal("cgen: res nil")
- }
-
- for n.Op == gc.OCONVNOP {
- n = n.Left
- }
-
- switch n.Op {
- case gc.OSLICE,
- gc.OSLICEARR,
- gc.OSLICESTR,
- gc.OSLICE3,
- gc.OSLICE3ARR:
- if res.Op != gc.ONAME || res.Addable == 0 {
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_slice(n, &n1)
- cgen(&n1, res)
- } else {
- gc.Cgen_slice(n, res)
- }
- return
-
- case gc.OEFACE:
- if res.Op != gc.ONAME || res.Addable == 0 {
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_eface(n, &n1)
- cgen(&n1, res)
- } else {
- gc.Cgen_eface(n, res)
- }
- return
- }
-
- if n.Ullman >= gc.UINF {
- if n.Op == gc.OINDREG {
- gc.Fatal("cgen: this is going to misscompile")
- }
- if res.Ullman >= gc.UINF {
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- cgen(n, &n1)
- cgen(&n1, res)
- return
- }
- }
-
- if gc.Isfat(n.Type) {
- if n.Type.Width < 0 {
- gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
- }
- sgen(n, res, n.Type.Width)
- return
- }
-
- if res.Addable == 0 {
- if n.Ullman > res.Ullman {
- var n1 gc.Node
- regalloc(&n1, n.Type, res)
- cgen(n, &n1)
- if n1.Ullman > res.Ullman {
- gc.Dump("n1", &n1)
- gc.Dump("res", res)
- gc.Fatal("loop in cgen")
- }
-
- cgen(&n1, res)
- regfree(&n1)
- return
- }
-
- var f int
- if res.Ullman >= gc.UINF {
- goto gen
- }
-
- if gc.Complexop(n, res) {
- gc.Complexgen(n, res)
- return
- }
-
- f = 1 // gen thru register
- switch n.Op {
- case gc.OLITERAL:
- if gc.Smallintconst(n) {
- f = 0
- }
-
- case gc.OREGISTER:
- f = 0
- }
-
- if !gc.Iscomplex[n.Type.Etype] {
- a := optoas(gc.OAS, res.Type)
- var addr obj.Addr
- if sudoaddable(a, res, &addr) {
- var p1 *obj.Prog
- if f != 0 {
- var n2 gc.Node
- regalloc(&n2, res.Type, nil)
- cgen(n, &n2)
- p1 = gins(a, &n2, nil)
- regfree(&n2)
- } else {
- p1 = gins(a, n, nil)
- }
- p1.To = addr
- if gc.Debug['g'] != 0 {
- fmt.Printf("%v [ignore previous line]\n", p1)
- }
- sudoclean()
- return
- }
- }
-
- gen:
- var n1 gc.Node
- igen(res, &n1, nil)
- cgen(n, &n1)
- regfree(&n1)
- return
- }
-
- // update addressability for string, slice
- // can't do in walk because n->left->addable
- // changes if n->left is an escaping local variable.
- switch n.Op {
- case gc.OSPTR,
- gc.OLEN:
- if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
- n.Addable = n.Left.Addable
- }
-
- case gc.OCAP:
- if gc.Isslice(n.Left.Type) {
- n.Addable = n.Left.Addable
- }
-
- case gc.OITAB:
- n.Addable = n.Left.Addable
- }
-
- if gc.Complexop(n, res) {
- gc.Complexgen(n, res)
- return
- }
-
- // if both are addressable, move
- if n.Addable != 0 {
- if n.Op == gc.OREGISTER || res.Op == gc.OREGISTER {
- gmove(n, res)
- } else {
- var n1 gc.Node
- regalloc(&n1, n.Type, nil)
- gmove(n, &n1)
- cgen(&n1, res)
- regfree(&n1)
- }
-
- return
- }
-
- nl := n.Left
- nr := n.Right
-
- if nl != nil && nl.Ullman >= gc.UINF {
- if nr != nil && nr.Ullman >= gc.UINF {
- var n1 gc.Node
- gc.Tempname(&n1, nl.Type)
- cgen(nl, &n1)
- n2 := *n
- n2.Left = &n1
- cgen(&n2, res)
- return
- }
- }
-
- if !gc.Iscomplex[n.Type.Etype] {
- a := optoas(gc.OAS, n.Type)
- var addr obj.Addr
- if sudoaddable(a, n, &addr) {
- if res.Op == gc.OREGISTER {
- p1 := gins(a, nil, res)
- p1.From = addr
- } else {
- var n2 gc.Node
- regalloc(&n2, n.Type, nil)
- p1 := gins(a, nil, &n2)
- p1.From = addr
- gins(a, &n2, res)
- regfree(&n2)
- }
-
- sudoclean()
- return
- }
- }
-
- // TODO(minux): we shouldn't reverse FP comparisons, but then we need to synthesize
- // OGE, OLE, and ONE ourselves.
- // if(nl != N && isfloat[n->type->etype] && isfloat[nl->type->etype]) goto flt;
-
- var a int
- switch n.Op {
- default:
- gc.Dump("cgen", n)
- gc.Fatal("cgen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
-
- // these call bgen to get a bool value
- case gc.OOROR,
- gc.OANDAND,
- gc.OEQ,
- gc.ONE,
- gc.OLT,
- gc.OLE,
- gc.OGE,
- gc.OGT,
- gc.ONOT:
- p1 := gc.Gbranch(ppc64.ABR, nil, 0)
-
- p2 := gc.Pc
- gmove(gc.Nodbool(true), res)
- p3 := gc.Gbranch(ppc64.ABR, nil, 0)
- gc.Patch(p1, gc.Pc)
- bgen(n, true, 0, p2)
- gmove(gc.Nodbool(false), res)
- gc.Patch(p3, gc.Pc)
- return
-
- case gc.OPLUS:
- cgen(nl, res)
- return
-
- // unary
- case gc.OCOM:
- a := optoas(gc.OXOR, nl.Type)
-
- var n1 gc.Node
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
- var n2 gc.Node
- gc.Nodconst(&n2, nl.Type, -1)
- gins(a, &n2, &n1)
- gmove(&n1, res)
- regfree(&n1)
- return
-
- case gc.OMINUS:
- if gc.Isfloat[nl.Type.Etype] {
- nr = gc.Nodintconst(-1)
- gc.Convlit(&nr, n.Type)
- a = optoas(gc.OMUL, nl.Type)
- goto sbop
- }
-
- a := optoas(int(n.Op), nl.Type)
- // unary
- var n1 gc.Node
- regalloc(&n1, nl.Type, res)
-
- cgen(nl, &n1)
- gins(a, nil, &n1)
- gmove(&n1, res)
- regfree(&n1)
- return
-
- // symmetric binary
- case gc.OAND,
- gc.OOR,
- gc.OXOR,
- gc.OADD,
- gc.OMUL:
- a = optoas(int(n.Op), nl.Type)
-
- goto sbop
-
- // asymmetric binary
- case gc.OSUB:
- a = optoas(int(n.Op), nl.Type)
-
- goto abop
-
- case gc.OHMUL:
- cgen_hmul(nl, nr, res)
-
- case gc.OCONV:
- if n.Type.Width > nl.Type.Width {
- // If loading from memory, do conversion during load,
- // so as to avoid use of 8-bit register in, say, int(*byteptr).
- switch nl.Op {
- case gc.ODOT,
- gc.ODOTPTR,
- gc.OINDEX,
- gc.OIND,
- gc.ONAME:
- var n1 gc.Node
- igen(nl, &n1, res)
- var n2 gc.Node
- regalloc(&n2, n.Type, res)
- gmove(&n1, &n2)
- gmove(&n2, res)
- regfree(&n2)
- regfree(&n1)
- return
- }
- }
-
- var n1 gc.Node
- regalloc(&n1, nl.Type, res)
- var n2 gc.Node
- regalloc(&n2, n.Type, &n1)
- cgen(nl, &n1)
-
- // if we do the conversion n1 -> n2 here
- // reusing the register, then gmove won't
- // have to allocate its own register.
- gmove(&n1, &n2)
-
- gmove(&n2, res)
- regfree(&n2)
- regfree(&n1)
-
- case gc.ODOT,
- gc.ODOTPTR,
- gc.OINDEX,
- gc.OIND,
- gc.ONAME: // PHEAP or PPARAMREF var
- var n1 gc.Node
- igen(n, &n1, res)
-
- gmove(&n1, res)
- regfree(&n1)
-
- // interface table is first word of interface value
- case gc.OITAB:
- var n1 gc.Node
- igen(nl, &n1, res)
-
- n1.Type = n.Type
- gmove(&n1, res)
- regfree(&n1)
-
- // pointer is the first word of string or slice.
- case gc.OSPTR:
- if gc.Isconst(nl, gc.CTSTR) {
- var n1 gc.Node
- regalloc(&n1, gc.Types[gc.Tptr], res)
- p1 := gins(ppc64.AMOVD, nil, &n1)
- gc.Datastring(nl.Val.U.Sval, &p1.From)
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- var n1 gc.Node
- igen(nl, &n1, res)
- n1.Type = n.Type
- gmove(&n1, res)
- regfree(&n1)
-
- case gc.OLEN:
- if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
- // map and chan have len in the first int-sized word.
- // a zero pointer means zero length
- var n1 gc.Node
- regalloc(&n1, gc.Types[gc.Tptr], res)
-
- cgen(nl, &n1)
-
- var n2 gc.Node
- gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
- gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
-
- n2 = n1
- n2.Op = gc.OINDREG
- n2.Type = gc.Types[gc.Simtype[gc.TINT]]
- gmove(&n2, &n1)
-
- gc.Patch(p1, gc.Pc)
-
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
- // both slice and string have len one pointer into the struct.
- // a zero pointer means zero length
- var n1 gc.Node
- igen(nl, &n1, res)
-
- n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
- n1.Xoffset += int64(gc.Array_nel)
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
-
- case gc.OCAP:
- if gc.Istype(nl.Type, gc.TCHAN) {
- // chan has cap in the second int-sized word.
- // a zero pointer means zero length
- var n1 gc.Node
- regalloc(&n1, gc.Types[gc.Tptr], res)
-
- cgen(nl, &n1)
-
- var n2 gc.Node
- gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
- gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
-
- n2 = n1
- n2.Op = gc.OINDREG
- n2.Xoffset = int64(gc.Widthint)
- n2.Type = gc.Types[gc.Simtype[gc.TINT]]
- gmove(&n2, &n1)
-
- gc.Patch(p1, gc.Pc)
-
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- if gc.Isslice(nl.Type) {
- var n1 gc.Node
- igen(nl, &n1, res)
- n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
- n1.Xoffset += int64(gc.Array_cap)
- gmove(&n1, res)
- regfree(&n1)
- break
- }
-
- gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
-
- case gc.OADDR:
- if n.Bounded { // let race detector avoid nil checks
- gc.Disable_checknil++
- }
- agen(nl, res)
- if n.Bounded {
- gc.Disable_checknil--
- }
-
- case gc.OCALLMETH:
- gc.Cgen_callmeth(n, 0)
- cgen_callret(n, res)
-
- case gc.OCALLINTER:
- cgen_callinter(n, res, 0)
- cgen_callret(n, res)
-
- case gc.OCALLFUNC:
- cgen_call(n, 0)
- cgen_callret(n, res)
-
- case gc.OMOD,
- gc.ODIV:
- if gc.Isfloat[n.Type.Etype] {
- a = optoas(int(n.Op), nl.Type)
- goto abop
- }
-
- if nl.Ullman >= nr.Ullman {
- var n1 gc.Node
- regalloc(&n1, nl.Type, res)
- cgen(nl, &n1)
- cgen_div(int(n.Op), &n1, nr, res)
- regfree(&n1)
- } else {
- var n2 gc.Node
- if !gc.Smallintconst(nr) {
- regalloc(&n2, nr.Type, res)
- cgen(nr, &n2)
- } else {
- n2 = *nr
- }
-
- cgen_div(int(n.Op), nl, &n2, res)
- if n2.Op != gc.OLITERAL {
- regfree(&n2)
- }
- }
-
- case gc.OLSH,
- gc.ORSH,
- gc.OLROT:
- cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
- }
-
- return
-
- /*
- * put simplest on right - we'll generate into left
- * and then adjust it using the computation of right.
- * constants and variables have the same ullman
- * count, so look for constants specially.
- *
- * an integer constant we can use as an immediate
- * is simpler than a variable - we can use the immediate
- * in the adjustment instruction directly - so it goes
- * on the right.
- *
- * other constants, like big integers or floating point
- * constants, require a mov into a register, so those
- * might as well go on the left, so we can reuse that
- * register for the computation.
- */
-sbop: // symmetric binary
- if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) {
- r := nl
- nl = nr
- nr = r
- }
-
-abop: // asymmetric binary
- var n1 gc.Node
- var n2 gc.Node
- if nl.Ullman >= nr.Ullman {
- regalloc(&n1, nl.Type, res)
- cgen(nl, &n1)
-
- /*
- * This generates smaller code - it avoids a MOV - but it's
- * easily 10% slower due to not being able to
- * optimize/manipulate the move.
- * To see, run: go test -bench . crypto/md5
- * with and without.
- *
- if(sudoaddable(a, nr, &addr)) {
- p1 = gins(a, N, &n1);
- p1->from = addr;
- gmove(&n1, res);
- sudoclean();
- regfree(&n1);
- goto ret;
- }
- *
- */
- // TODO(minux): enable using constants directly in certain instructions.
- //if(smallintconst(nr))
- // n2 = *nr;
- //else {
- regalloc(&n2, nr.Type, nil)
-
- cgen(nr, &n2)
- } else //}
- {
- //if(smallintconst(nr))
- // n2 = *nr;
- //else {
- regalloc(&n2, nr.Type, res)
-
- cgen(nr, &n2)
-
- //}
- regalloc(&n1, nl.Type, nil)
-
- cgen(nl, &n1)
- }
-
- gins(a, &n2, &n1)
-
- // Normalize result for types smaller than word.
- if n.Type.Width < int64(gc.Widthreg) {
- switch n.Op {
- case gc.OADD,
- gc.OSUB,
- gc.OMUL,
- gc.OLSH:
- gins(optoas(gc.OAS, n.Type), &n1, &n1)
- }
- }
-
- gmove(&n1, res)
- regfree(&n1)
- if n2.Op != gc.OLITERAL {
- regfree(&n2)
- }
- return
-}
-
-/*
- * allocate a register (reusing res if possible) and generate
- * a = n
- * The caller must call regfree(a).
- */
-func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("cgenr-n", n)
- }
-
- if gc.Isfat(n.Type) {
- gc.Fatal("cgenr on fat node")
- }
-
- if n.Addable != 0 {
- regalloc(a, n.Type, res)
- gmove(n, a)
- return
- }
-
- switch n.Op {
- case gc.ONAME,
- gc.ODOT,
- gc.ODOTPTR,
- gc.OINDEX,
- gc.OCALLFUNC,
- gc.OCALLMETH,
- gc.OCALLINTER:
- var n1 gc.Node
- igen(n, &n1, res)
- regalloc(a, gc.Types[gc.Tptr], &n1)
- gmove(&n1, a)
- regfree(&n1)
-
- default:
- regalloc(a, n.Type, res)
- cgen(n, a)
- }
-}
-
-/*
- * allocate a register (reusing res if possible) and generate
- * a = &n
- * The caller must call regfree(a).
- * The generated code checks that the result is not nil.
- */
-func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("agenr-n", n)
- }
-
- nl := n.Left
- nr := n.Right
-
- switch n.Op {
- case gc.ODOT,
- gc.ODOTPTR,
- gc.OCALLFUNC,
- gc.OCALLMETH,
- gc.OCALLINTER:
- var n1 gc.Node
- igen(n, &n1, res)
- regalloc(a, gc.Types[gc.Tptr], &n1)
- agen(&n1, a)
- regfree(&n1)
-
- case gc.OIND:
- cgenr(n.Left, a, res)
- gc.Cgen_checknil(a)
-
- case gc.OINDEX:
- var p2 *obj.Prog // to be patched to panicindex.
- w := uint32(n.Type.Width)
-
- //bounded = debug['B'] || n->bounded;
- var n3 gc.Node
- var n1 gc.Node
- if nr.Addable != 0 {
- var tmp gc.Node
- if !gc.Isconst(nr, gc.CTINT) {
- gc.Tempname(&tmp, gc.Types[gc.TINT64])
- }
- if !gc.Isconst(nl, gc.CTSTR) {
- agenr(nl, &n3, res)
- }
- if !gc.Isconst(nr, gc.CTINT) {
- cgen(nr, &tmp)
- regalloc(&n1, tmp.Type, nil)
- gmove(&tmp, &n1)
- }
- } else if nl.Addable != 0 {
- if !gc.Isconst(nr, gc.CTINT) {
- var tmp gc.Node
- gc.Tempname(&tmp, gc.Types[gc.TINT64])
- cgen(nr, &tmp)
- regalloc(&n1, tmp.Type, nil)
- gmove(&tmp, &n1)
- }
-
- if !gc.Isconst(nl, gc.CTSTR) {
- agenr(nl, &n3, res)
- }
- } else {
- var tmp gc.Node
- gc.Tempname(&tmp, gc.Types[gc.TINT64])
- cgen(nr, &tmp)
- nr = &tmp
- if !gc.Isconst(nl, gc.CTSTR) {
- agenr(nl, &n3, res)
- }
- regalloc(&n1, tmp.Type, nil)
- gins(optoas(gc.OAS, tmp.Type), &tmp, &n1)
- }
-
- // &a is in &n3 (allocated in res)
- // i is in &n1 (if not constant)
- // w is width
-
- // constant index
- if gc.Isconst(nr, gc.CTINT) {
- if gc.Isconst(nl, gc.CTSTR) {
- gc.Fatal("constant string constant index")
- }
- v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
- if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
- if gc.Debug['B'] == 0 && !n.Bounded {
- n1 = n3
- n1.Op = gc.OINDREG
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset = int64(gc.Array_nel)
- var n4 gc.Node
- regalloc(&n4, n1.Type, nil)
- gmove(&n1, &n4)
- ginscon2(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n4, int64(v))
- regfree(&n4)
- p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT64]), nil, +1)
- ginscall(gc.Panicindex, 0)
- gc.Patch(p1, gc.Pc)
- }
-
- n1 = n3
- n1.Op = gc.OINDREG
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset = int64(gc.Array_array)
- gmove(&n1, &n3)
- }
-
- if v*uint64(w) != 0 {
- ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), int64(v*uint64(w)), &n3)
- }
-
- *a = n3
- break
- }
-
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.TINT64], &n1) // i
- gmove(&n1, &n2)
- regfree(&n1)
-
- var n4 gc.Node
- if gc.Debug['B'] == 0 && !n.Bounded {
- // check bounds
- if gc.Isconst(nl, gc.CTSTR) {
- gc.Nodconst(&n4, gc.Types[gc.TUINT64], int64(len(nl.Val.U.Sval)))
- } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
- n1 = n3
- n1.Op = gc.OINDREG
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset = int64(gc.Array_nel)
- regalloc(&n4, gc.Types[gc.TUINT64], nil)
- gmove(&n1, &n4)
- } else {
- if nl.Type.Bound < (1<<15)-1 {
- gc.Nodconst(&n4, gc.Types[gc.TUINT64], nl.Type.Bound)
- } else {
- regalloc(&n4, gc.Types[gc.TUINT64], nil)
- p1 := gins(ppc64.AMOVD, nil, &n4)
- p1.From.Type = obj.TYPE_CONST
- p1.From.Offset = nl.Type.Bound
- }
- }
-
- gins(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n2, &n4)
- if n4.Op == gc.OREGISTER {
- regfree(&n4)
- }
- p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
- if p2 != nil {
- gc.Patch(p2, gc.Pc)
- }
- ginscall(gc.Panicindex, 0)
- gc.Patch(p1, gc.Pc)
- }
-
- if gc.Isconst(nl, gc.CTSTR) {
- regalloc(&n3, gc.Types[gc.Tptr], res)
- p1 := gins(ppc64.AMOVD, nil, &n3)
- gc.Datastring(nl.Val.U.Sval, &p1.From)
- p1.From.Type = obj.TYPE_ADDR
- } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
- n1 = n3
- n1.Op = gc.OINDREG
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset = int64(gc.Array_array)
- gmove(&n1, &n3)
- }
-
- if w == 0 {
- } else // nothing to do
- if w == 1 {
- /* w already scaled */
- gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
- /* else if(w == 2 || w == 4 || w == 8) {
- // TODO(minux): scale using shift
- } */
- } else {
- regalloc(&n4, gc.Types[gc.TUINT64], nil)
- gc.Nodconst(&n1, gc.Types[gc.TUINT64], int64(w))
- gmove(&n1, &n4)
- gins(optoas(gc.OMUL, gc.Types[gc.TUINT64]), &n4, &n2)
- gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
- regfree(&n4)
- }
-
- *a = n3
- regfree(&n2)
-
- default:
- regalloc(a, gc.Types[gc.Tptr], res)
- agen(n, a)
- }
-}
-
-func ginsadd(as int, off int64, dst *gc.Node) {
- var n1 gc.Node
-
- regalloc(&n1, gc.Types[gc.Tptr], dst)
- gmove(dst, &n1)
- ginscon(as, off, &n1)
- gmove(&n1, dst)
- regfree(&n1)
-}
-
-/*
- * generate:
- * res = &n;
- * The generated code checks that the result is not nil.
- */
-func agen(n *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\nagen-res", res)
- gc.Dump("agen-r", n)
- }
-
- if n == nil || n.Type == nil {
- return
- }
-
- for n.Op == gc.OCONVNOP {
- n = n.Left
- }
-
- if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
- // Use of a nil interface or nil slice.
- // Create a temporary we can take the address of and read.
- // The generated code is just going to panic, so it need not
- // be terribly efficient. See issue 3670.
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
-
- gc.Gvardef(&n1)
- clearfat(&n1)
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.Tptr], res)
- var n3 gc.Node
- n3.Op = gc.OADDR
- n3.Left = &n1
- gins(ppc64.AMOVD, &n3, &n2)
- gmove(&n2, res)
- regfree(&n2)
- return
- }
-
- if n.Addable != 0 {
- var n1 gc.Node
- n1.Op = gc.OADDR
- n1.Left = n
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.Tptr], res)
- gins(ppc64.AMOVD, &n1, &n2)
- gmove(&n2, res)
- regfree(&n2)
- return
- }
-
- nl := n.Left
-
- switch n.Op {
- default:
- gc.Fatal("agen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
-
- // TODO(minux): 5g has this: Release res so that it is available for cgen_call.
- // Pick it up again after the call for OCALLMETH and OCALLFUNC.
- case gc.OCALLMETH:
- gc.Cgen_callmeth(n, 0)
-
- cgen_aret(n, res)
-
- case gc.OCALLINTER:
- cgen_callinter(n, res, 0)
- cgen_aret(n, res)
-
- case gc.OCALLFUNC:
- cgen_call(n, 0)
- cgen_aret(n, res)
-
- case gc.OSLICE,
- gc.OSLICEARR,
- gc.OSLICESTR,
- gc.OSLICE3,
- gc.OSLICE3ARR:
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_slice(n, &n1)
- agen(&n1, res)
-
- case gc.OEFACE:
- var n1 gc.Node
- gc.Tempname(&n1, n.Type)
- gc.Cgen_eface(n, &n1)
- agen(&n1, res)
-
- case gc.OINDEX:
- var n1 gc.Node
- agenr(n, &n1, res)
- gmove(&n1, res)
- regfree(&n1)
-
- // should only get here with names in this func.
- case gc.ONAME:
- if n.Funcdepth > 0 && n.Funcdepth != gc.Funcdepth {
- gc.Dump("bad agen", n)
- gc.Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, gc.Funcdepth)
- }
-
- // should only get here for heap vars or paramref
- if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
- gc.Dump("bad agen", n)
- gc.Fatal("agen: bad ONAME class %#x", n.Class)
- }
-
- cgen(n.Heapaddr, res)
- if n.Xoffset != 0 {
- ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
- }
-
- case gc.OIND:
- cgen(nl, res)
- gc.Cgen_checknil(res)
-
- case gc.ODOT:
- agen(nl, res)
- if n.Xoffset != 0 {
- ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
- }
-
- case gc.ODOTPTR:
- cgen(nl, res)
- gc.Cgen_checknil(res)
- if n.Xoffset != 0 {
- ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
- }
- }
-}
-
-/*
- * generate:
- * newreg = &n;
- * res = newreg
- *
- * on exit, a has been changed to be *newreg.
- * caller must regfree(a).
- * The generated code checks that the result is not *nil.
- */
-func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\nigen-n", n)
- }
-
- switch n.Op {
- case gc.ONAME:
- if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
- break
- }
- *a = *n
- return
-
- // Increase the refcount of the register so that igen's caller
- // has to call regfree.
- case gc.OINDREG:
- if n.Val.U.Reg != ppc64.REGSP {
- reg[n.Val.U.Reg]++
- }
- *a = *n
- return
-
- case gc.ODOT:
- igen(n.Left, a, res)
- a.Xoffset += n.Xoffset
- a.Type = n.Type
- fixlargeoffset(a)
- return
-
- case gc.ODOTPTR:
- cgenr(n.Left, a, res)
- gc.Cgen_checknil(a)
- a.Op = gc.OINDREG
- a.Xoffset += n.Xoffset
- a.Type = n.Type
- fixlargeoffset(a)
- return
-
- case gc.OCALLFUNC,
- gc.OCALLMETH,
- gc.OCALLINTER:
- switch n.Op {
- case gc.OCALLFUNC:
- cgen_call(n, 0)
-
- case gc.OCALLMETH:
- gc.Cgen_callmeth(n, 0)
-
- case gc.OCALLINTER:
- cgen_callinter(n, nil, 0)
- }
-
- var flist gc.Iter
- fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
- *a = gc.Node{}
- a.Op = gc.OINDREG
- a.Val.U.Reg = ppc64.REGSP
- a.Addable = 1
- a.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved lr at 0(SP)
- a.Type = n.Type
- return
-
- // Index of fixed-size array by constant can
- // put the offset in the addressing.
- // Could do the same for slice except that we need
- // to use the real index for the bounds checking.
- case gc.OINDEX:
- if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] && gc.Isfixedarray(n.Left.Left.Type)) {
- if gc.Isconst(n.Right, gc.CTINT) {
- // Compute &a.
- if !gc.Isptr[n.Left.Type.Etype] {
- igen(n.Left, a, res)
- } else {
- var n1 gc.Node
- igen(n.Left, &n1, res)
- gc.Cgen_checknil(&n1)
- regalloc(a, gc.Types[gc.Tptr], res)
- gmove(&n1, a)
- regfree(&n1)
- a.Op = gc.OINDREG
- }
-
- // Compute &a[i] as &a + i*width.
- a.Type = n.Type
-
- a.Xoffset += gc.Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width
- fixlargeoffset(a)
- return
- }
- }
- }
-
- agenr(n, a, res)
- a.Op = gc.OINDREG
- a.Type = n.Type
-}
-
-/*
- * generate:
- * if(n == true) goto to;
- */
-func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
- if gc.Debug['g'] != 0 {
- gc.Dump("\nbgen", n)
- }
-
- if n == nil {
- n = gc.Nodbool(true)
- }
-
- if n.Ninit != nil {
- gc.Genlist(n.Ninit)
- }
-
- if n.Type == nil {
- gc.Convlit(&n, gc.Types[gc.TBOOL])
- if n.Type == nil {
- return
- }
- }
-
- et := int(n.Type.Etype)
- if et != gc.TBOOL {
- gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
- gc.Patch(gins(obj.AEND, nil, nil), to)
- return
- }
-
- var nr *gc.Node
-
- for n.Op == gc.OCONVNOP {
- n = n.Left
- if n.Ninit != nil {
- gc.Genlist(n.Ninit)
- }
- }
-
- var nl *gc.Node
- switch n.Op {
- default:
- var n1 gc.Node
- regalloc(&n1, n.Type, nil)
- cgen(n, &n1)
- var n2 gc.Node
- gc.Nodconst(&n2, n.Type, 0)
- gins(optoas(gc.OCMP, n.Type), &n1, &n2)
- a := ppc64.ABNE
- if !true_ {
- a = ppc64.ABEQ
- }
- gc.Patch(gc.Gbranch(a, n.Type, likely), to)
- regfree(&n1)
- return
-
- // need to ask if it is bool?
- case gc.OLITERAL:
- if !true_ == (n.Val.U.Bval == 0) {
- gc.Patch(gc.Gbranch(ppc64.ABR, nil, likely), to)
- }
- return
-
- case gc.OANDAND,
- gc.OOROR:
- if (n.Op == gc.OANDAND) == true_ {
- p1 := gc.Gbranch(obj.AJMP, nil, 0)
- p2 := gc.Gbranch(obj.AJMP, nil, 0)
- gc.Patch(p1, gc.Pc)
- bgen(n.Left, !true_, -likely, p2)
- bgen(n.Right, !true_, -likely, p2)
- p1 = gc.Gbranch(obj.AJMP, nil, 0)
- gc.Patch(p1, to)
- gc.Patch(p2, gc.Pc)
- } else {
- bgen(n.Left, true_, likely, to)
- bgen(n.Right, true_, likely, to)
- }
-
- return
-
- case gc.OEQ,
- gc.ONE,
- gc.OLT,
- gc.OGT,
- gc.OLE,
- gc.OGE:
- nr = n.Right
- if nr == nil || nr.Type == nil {
- return
- }
- fallthrough
-
- case gc.ONOT: // unary
- nl = n.Left
-
- if nl == nil || nl.Type == nil {
- return
- }
- }
-
- switch n.Op {
- case gc.ONOT:
- bgen(nl, !true_, likely, to)
- return
-
- case gc.OEQ,
- gc.ONE,
- gc.OLT,
- gc.OGT,
- gc.OLE,
- gc.OGE:
- a := int(n.Op)
- if !true_ {
- if gc.Isfloat[nr.Type.Etype] {
- // brcom is not valid on floats when NaN is involved.
- p1 := gc.Gbranch(ppc64.ABR, nil, 0)
-
- p2 := gc.Gbranch(ppc64.ABR, nil, 0)
- gc.Patch(p1, gc.Pc)
- ll := n.Ninit // avoid re-genning ninit
- n.Ninit = nil
- bgen(n, true, -likely, p2)
- n.Ninit = ll
- gc.Patch(gc.Gbranch(ppc64.ABR, nil, 0), to)
- gc.Patch(p2, gc.Pc)
- return
- }
-
- a = gc.Brcom(a)
- true_ = !true_
- }
-
- // make simplest on right
- if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
- a = gc.Brrev(a)
- r := nl
- nl = nr
- nr = r
- }
-
- if gc.Isslice(nl.Type) {
- // front end should only leave cmp to literal nil
- if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
- gc.Yyerror("illegal slice comparison")
- break
- }
-
- a = optoas(a, gc.Types[gc.Tptr])
- var n1 gc.Node
- igen(nl, &n1, nil)
- n1.Xoffset += int64(gc.Array_array)
- n1.Type = gc.Types[gc.Tptr]
- var tmp gc.Node
- gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.Tptr], &n1)
- gmove(&n1, &n2)
- gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp)
- regfree(&n2)
- gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
- regfree(&n1)
- break
- }
-
- if gc.Isinter(nl.Type) {
- // front end should only leave cmp to literal nil
- if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
- gc.Yyerror("illegal interface comparison")
- break
- }
-
- a = optoas(a, gc.Types[gc.Tptr])
- var n1 gc.Node
- igen(nl, &n1, nil)
- n1.Type = gc.Types[gc.Tptr]
- var tmp gc.Node
- gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
- var n2 gc.Node
- regalloc(&n2, gc.Types[gc.Tptr], &n1)
- gmove(&n1, &n2)
- gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp)
- regfree(&n2)
- gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
- regfree(&n1)
- break
- }
-
- if gc.Iscomplex[nl.Type.Etype] {
- gc.Complexbool(a, nl, nr, true_, likely, to)
- break
- }
-
- var n1 gc.Node
- var n2 gc.Node
- if nr.Ullman >= gc.UINF {
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
-
- var tmp gc.Node
- gc.Tempname(&tmp, nl.Type)
- gmove(&n1, &tmp)
- regfree(&n1)
-
- regalloc(&n2, nr.Type, nil)
- cgen(nr, &n2)
-
- regalloc(&n1, nl.Type, nil)
- cgen(&tmp, &n1)
-
- goto cmp
- }
-
- regalloc(&n1, nl.Type, nil)
- cgen(nl, &n1)
-
- // TODO(minux): cmpi does accept 16-bit signed immediate as p->to.
- // and cmpli accepts 16-bit unsigned immediate.
- //if(smallintconst(nr)) {
- // gins(optoas(OCMP, nr->type), &n1, nr);
- // patch(gbranch(optoas(a, nr->type), nr->type, likely), to);
- // regfree(&n1);
- // break;
- //}
-
- regalloc(&n2, nr.Type, nil)
-
- cgen(nr, &n2)
-
- cmp:
- l := &n1
- r := &n2
- gins(optoas(gc.OCMP, nr.Type), l, r)
- if gc.Isfloat[nr.Type.Etype] && (a == gc.OLE || a == gc.OGE) {
- // To get NaN right, must rewrite x <= y into separate x < y or x = y.
- switch a {
- case gc.OLE:
- a = gc.OLT
-
- case gc.OGE:
- a = gc.OGT
- }
-
- gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
- gc.Patch(gc.Gbranch(optoas(gc.OEQ, nr.Type), nr.Type, likely), to)
- } else {
- gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
- }
-
- regfree(&n1)
- regfree(&n2)
- }
-
- return
-}
-
-/*
- * n is on stack, either local variable
- * or return value from function call.
- * return n's offset from SP.
- */
-func stkof(n *gc.Node) int64 {
- switch n.Op {
- case gc.OINDREG:
- return n.Xoffset
-
- case gc.ODOT:
- t := n.Left.Type
- if gc.Isptr[t.Etype] {
- break
- }
- off := stkof(n.Left)
- if off == -1000 || off == 1000 {
- return off
- }
- return off + n.Xoffset
-
- case gc.OINDEX:
- t := n.Left.Type
- if !gc.Isfixedarray(t) {
- break
- }
- off := stkof(n.Left)
- if off == -1000 || off == 1000 {
- return off
- }
- if gc.Isconst(n.Right, gc.CTINT) {
- return off + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval)
- }
- return 1000
-
- case gc.OCALLMETH,
- gc.OCALLINTER,
- gc.OCALLFUNC:
- t := n.Left.Type
- if gc.Isptr[t.Etype] {
- t = t.Type
- }
-
- var flist gc.Iter
- t = gc.Structfirst(&flist, gc.Getoutarg(t))
- if t != nil {
- return t.Width + int64(gc.Widthptr) // +widthptr: correct for saved LR
- }
- }
-
- // botch - probably failing to recognize address
- // arithmetic on the above. eg INDEX and DOT
- return -1000
-}
-
-/*
- * block copy:
- * memmove(&ns, &n, w);
- */
-func sgen(n *gc.Node, ns *gc.Node, w int64) {
- var res *gc.Node = ns
-
- if gc.Debug['g'] != 0 {
- fmt.Printf("\nsgen w=%d\n", w)
- gc.Dump("r", n)
- gc.Dump("res", ns)
- }
-
- if n.Ullman >= gc.UINF && ns.Ullman >= gc.UINF {
- gc.Fatal("sgen UINF")
- }
-
- if w < 0 {
- gc.Fatal("sgen copy %d", w)
- }
-
- // If copying .args, that's all the results, so record definition sites
- // for them for the liveness analysis.
- if ns.Op == gc.ONAME && ns.Sym.Name == ".args" {
- for l := gc.Curfn.Dcl; l != nil; l = l.Next {
- if l.N.Class == gc.PPARAMOUT {
- gc.Gvardef(l.N)
- }
- }
- }
-
- // Avoid taking the address for simple enough types.
- if gc.Componentgen(n, ns) {
- return
- }
-
- if w == 0 {
- // evaluate side effects only.
- var dst gc.Node
- regalloc(&dst, gc.Types[gc.Tptr], nil)
-
- agen(res, &dst)
- agen(n, &dst)
- regfree(&dst)
- return
- }
-
+func stackcopy(n, res *gc.Node, osrc, odst, w int64) {
// determine alignment.
// want to avoid unaligned access, so have to use
// smaller operations for less aligned types.
}
c := int32(w / int64(align))
- // offset on the stack
- osrc := int32(stkof(n))
-
- odst := int32(stkof(res))
- if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
- // osrc and odst both on stack, and at least one is in
- // an unknown position. Could generate code to test
- // for forward/backward copy, but instead just copy
- // to a temporary location first.
- var tmp gc.Node
- gc.Tempname(&tmp, n.Type)
-
- sgen(n, &tmp, w)
- sgen(&tmp, res, w)
- return
- }
-
- if osrc%int32(align) != 0 || odst%int32(align) != 0 {
- gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
- }
-
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
dir := align
var dst gc.Node
var src gc.Node
if n.Ullman >= res.Ullman {
- agenr(n, &dst, res) // temporarily use dst
- regalloc(&src, gc.Types[gc.Tptr], nil)
+ gc.Agenr(n, &dst, res) // temporarily use dst
+ gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
gins(ppc64.AMOVD, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
- agen(res, &dst)
+ gc.Agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
- agenr(res, &dst, res)
- agenr(n, &src, nil)
+ gc.Agenr(res, &dst, res)
+ gc.Agenr(n, &src, nil)
}
var tmp gc.Node
- regalloc(&tmp, gc.Types[gc.Tptr], nil)
+ gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
// set up end marker
var nend gc.Node
// move src and dest to the end of block if necessary
if dir < 0 {
if c >= 4 {
- regalloc(&nend, gc.Types[gc.Tptr], nil)
+ gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
gins(ppc64.AMOVD, &src, &nend)
}
p.From.Offset = int64(-dir)
if c >= 4 {
- regalloc(&nend, gc.Types[gc.Tptr], nil)
+ gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
p := gins(ppc64.AMOVD, &src, &nend)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w
p = gins(ppc64.ACMP, &src, &nend)
gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), ploop)
- regfree(&nend)
+ gc.Regfree(&nend)
} else {
// TODO(austin): Instead of generating ADD $-8,R8; ADD
// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
}
}
- regfree(&dst)
- regfree(&src)
- regfree(&tmp)
+ gc.Regfree(&dst)
+ gc.Regfree(&src)
+ gc.Regfree(&tmp)
}
gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = ppc64.REGSP
gc.Thearch.REGCTXT = ppc64.REGCTXT
+ gc.Thearch.REGCALLX = ppc64.REG_R3
+ gc.Thearch.REGCALLX2 = ppc64.REG_R4
+ gc.Thearch.REGRETURN = ppc64.REG_R3
+ gc.Thearch.REGMIN = ppc64.REG_R0
+ gc.Thearch.REGMAX = ppc64.REG_R31
+ gc.Thearch.FREGMIN = ppc64.REG_F0
+ gc.Thearch.FREGMAX = ppc64.REG_F31
gc.Thearch.MAXWIDTH = MAXWIDTH
- gc.Thearch.Anyregalloc = anyregalloc
+ gc.Thearch.ReservedRegs = resvd
+
gc.Thearch.Betypeinit = betypeinit
- gc.Thearch.Bgen = bgen
- gc.Thearch.Cgen = cgen
- gc.Thearch.Cgen_call = cgen_call
- gc.Thearch.Cgen_callinter = cgen_callinter
- gc.Thearch.Cgen_ret = cgen_ret
+ gc.Thearch.Cgen_hmul = cgen_hmul
+ gc.Thearch.Cgen_shift = cgen_shift
gc.Thearch.Clearfat = clearfat
gc.Thearch.Defframe = defframe
+ gc.Thearch.Dodiv = dodiv
gc.Thearch.Excise = excise
gc.Thearch.Expandchecks = expandchecks
- gc.Thearch.Gclean = gclean
- gc.Thearch.Ginit = ginit
gc.Thearch.Gins = gins
- gc.Thearch.Ginscall = ginscall
+ gc.Thearch.Ginscon = ginscon
+ gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Gmove = gmove
- gc.Thearch.Igen = igen
gc.Thearch.Linkarchinit = linkarchinit
gc.Thearch.Peep = peep
gc.Thearch.Proginfo = proginfo
- gc.Thearch.Regalloc = regalloc
- gc.Thearch.Regfree = regfree
gc.Thearch.Regtyp = regtyp
gc.Thearch.Sameaddr = sameaddr
gc.Thearch.Smallindir = smallindir
gc.Thearch.Stackaddr = stackaddr
+ gc.Thearch.Stackcopy = stackcopy
+ gc.Thearch.Sudoaddable = sudoaddable
+ gc.Thearch.Sudoclean = sudoclean
gc.Thearch.Excludedregs = excludedregs
gc.Thearch.RtoB = RtoB
gc.Thearch.FtoB = RtoB
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import "cmd/internal/obj/ppc64"
-import "cmd/internal/gc"
-
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-var reg [ppc64.NREG + ppc64.NFREG]uint8
-
-var panicdiv *gc.Node
-
-/*
- * cgen.c
- */
-
-/*
- * list.c
- */
-
-/*
- * reg.c
- */
return q
}
-/*
- * generate: BL reg, f
- * where both reg and f are registers.
- * On power, f must be moved to CTR first.
- */
-func ginsBL(reg *gc.Node, f *gc.Node) {
- p := gins(ppc64.AMOVD, f, nil)
- p.To.Type = obj.TYPE_REG
- p.To.Reg = ppc64.REG_CTR
- p = gins(ppc64.ABL, reg, nil)
- p.To.Type = obj.TYPE_REG
- p.To.Reg = ppc64.REG_CTR
+func ginsnop() {
+ var reg gc.Node
+ gc.Nodreg(®, gc.Types[gc.TINT], ppc64.REG_R0)
+ gins(ppc64.AOR, ®, ®)
}
-/*
- * generate:
- * call f
- * proc=-1 normal call but no return
- * proc=0 normal call
- * proc=1 goroutine run in new proc
- * proc=2 defer call save away stack
- * proc=3 normal call to C pointer (not Go func value)
-*/
-func ginscall(f *gc.Node, proc int) {
- if f.Type != nil {
- extra := int32(0)
- if proc == 1 || proc == 2 {
- extra = 2 * int32(gc.Widthptr)
- }
- gc.Setmaxarg(f.Type, extra)
- }
-
- switch proc {
- default:
- gc.Fatal("ginscall: bad proc %d", proc)
-
- case 0, // normal call
- -1: // normal call but no return
- if f.Op == gc.ONAME && f.Class == gc.PFUNC {
- if f == gc.Deferreturn {
- // Deferred calls will appear to be returning to
- // the CALL deferreturn(SB) that we are about to emit.
- // However, the stack trace code will show the line
- // of the instruction byte before the return PC.
- // To avoid that being an unrelated instruction,
- // insert a ppc64 NOP that we will have the right line number.
- // The ppc64 NOP is really or r0, r0, r0; use that description
- // because the NOP pseudo-instruction would be removed by
- // the linker.
- var reg gc.Node
- gc.Nodreg(®, gc.Types[gc.TINT], ppc64.REG_R0)
-
- gins(ppc64.AOR, ®, ®)
- }
-
- p := gins(ppc64.ABL, nil, f)
- gc.Afunclit(&p.To, f)
- if proc == -1 || gc.Noreturn(p) {
- gins(obj.AUNDEF, nil, nil)
- }
- break
- }
-
- var reg gc.Node
- gc.Nodreg(®, gc.Types[gc.Tptr], ppc64.REGCTXT)
- var r1 gc.Node
- gc.Nodreg(&r1, gc.Types[gc.Tptr], ppc64.REG_R3)
- gmove(f, ®)
- reg.Op = gc.OINDREG
- gmove(®, &r1)
- reg.Op = gc.OREGISTER
- ginsBL(®, &r1)
-
- case 3: // normal call of c function pointer
- ginsBL(nil, f)
-
- case 1, // call in new proc (go)
- 2: // deferred call (defer)
- var con gc.Node
- gc.Nodconst(&con, gc.Types[gc.TINT64], int64(gc.Argsize(f.Type)))
-
- var reg gc.Node
- gc.Nodreg(®, gc.Types[gc.TINT64], ppc64.REG_R3)
- var reg2 gc.Node
- gc.Nodreg(®2, gc.Types[gc.TINT64], ppc64.REG_R4)
- gmove(f, ®)
-
- gmove(&con, ®2)
- p := gins(ppc64.AMOVW, ®2, nil)
- p.To.Type = obj.TYPE_MEM
- p.To.Reg = ppc64.REGSP
- p.To.Offset = 8
-
- p = gins(ppc64.AMOVD, ®, nil)
- p.To.Type = obj.TYPE_MEM
- p.To.Reg = ppc64.REGSP
- p.To.Offset = 16
-
- if proc == 1 {
- ginscall(gc.Newproc, 0)
- } else {
- if gc.Hasdefer == 0 {
- gc.Fatal("hasdefer=0 but has defer")
- }
- ginscall(gc.Deferproc, 0)
- }
-
- if proc == 2 {
- gc.Nodreg(®, gc.Types[gc.TINT64], ppc64.REG_R3)
- p := gins(ppc64.ACMP, ®, nil)
- p.To.Type = obj.TYPE_REG
- p.To.Reg = ppc64.REGZERO
- p = gc.Gbranch(ppc64.ABEQ, nil, +1)
- cgen_ret(nil)
- gc.Patch(p, gc.Pc)
- }
- }
-}
-
-/*
- * n is call to interface method.
- * generate res = n.
- */
-func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
- i := n.Left
- if i.Op != gc.ODOTINTER {
- gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
- }
-
- f := i.Right // field
- if f.Op != gc.ONAME {
- gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
- }
-
- i = i.Left // interface
-
- if i.Addable == 0 {
- var tmpi gc.Node
- gc.Tempname(&tmpi, i.Type)
- cgen(i, &tmpi)
- i = &tmpi
- }
-
- gc.Genlist(n.List) // assign the args
-
- // i is now addable, prepare an indirected
- // register to hold its address.
- var nodi gc.Node
- igen(i, &nodi, res) // REG = &inter
-
- var nodsp gc.Node
- gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], ppc64.REGSP)
-
- nodsp.Xoffset = int64(gc.Widthptr)
- if proc != 0 {
- nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
- }
- nodi.Type = gc.Types[gc.Tptr]
- nodi.Xoffset += int64(gc.Widthptr)
- cgen(&nodi, &nodsp) // {8 or 24}(SP) = 8(REG) -- i.data
-
- var nodo gc.Node
- regalloc(&nodo, gc.Types[gc.Tptr], res)
-
- nodi.Type = gc.Types[gc.Tptr]
- nodi.Xoffset -= int64(gc.Widthptr)
- cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
- regfree(&nodi)
-
- var nodr gc.Node
- regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
- if n.Left.Xoffset == gc.BADWIDTH {
- gc.Fatal("cgen_callinter: badwidth")
- }
- gc.Cgen_checknil(&nodo) // in case offset is huge
- nodo.Op = gc.OINDREG
- nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
- if proc == 0 {
- // plain call: use direct c function pointer - more efficient
- cgen(&nodo, &nodr) // REG = 32+offset(REG) -- i.tab->fun[f]
- proc = 3
- } else {
- // go/defer. generate go func value.
- p := gins(ppc64.AMOVD, &nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
- p.From.Type = obj.TYPE_ADDR
- }
-
- nodr.Type = n.Left.Type
- ginscall(&nodr, proc)
-
- regfree(&nodr)
- regfree(&nodo)
-}
-
-/*
- * generate function call;
- * proc=0 normal call
- * proc=1 goroutine run in new proc
- * proc=2 defer call save away stack
- */
-func cgen_call(n *gc.Node, proc int) {
- if n == nil {
- return
- }
-
- var afun gc.Node
- if n.Left.Ullman >= gc.UINF {
- // if name involves a fn call
- // precompute the address of the fn
- gc.Tempname(&afun, gc.Types[gc.Tptr])
-
- cgen(n.Left, &afun)
- }
-
- gc.Genlist(n.List) // assign the args
- t := n.Left.Type
-
- // call tempname pointer
- if n.Left.Ullman >= gc.UINF {
- var nod gc.Node
- regalloc(&nod, gc.Types[gc.Tptr], nil)
- gc.Cgen_as(&nod, &afun)
- nod.Type = t
- ginscall(&nod, proc)
- regfree(&nod)
- return
- }
-
- // call pointer
- if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
- var nod gc.Node
- regalloc(&nod, gc.Types[gc.Tptr], nil)
- gc.Cgen_as(&nod, n.Left)
- nod.Type = t
- ginscall(&nod, proc)
- regfree(&nod)
- return
- }
-
- // call direct
- n.Left.Method = 1
-
- ginscall(n.Left, proc)
-}
-
-/*
- * call to n has already been generated.
- * generate:
- * res = return value from call.
- */
-func cgen_callret(n *gc.Node, res *gc.Node) {
- t := n.Left.Type
- if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
- t = t.Type
- }
-
- var flist gc.Iter
- fp := gc.Structfirst(&flist, gc.Getoutarg(t))
- if fp == nil {
- gc.Fatal("cgen_callret: nil")
- }
-
- var nod gc.Node
- nod.Op = gc.OINDREG
- nod.Val.U.Reg = ppc64.REGSP
- nod.Addable = 1
-
- nod.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved LR at 0(R1)
- nod.Type = fp.Type
- gc.Cgen_as(res, &nod)
-}
-
-/*
- * call to n has already been generated.
- * generate:
- * res = &return value from call.
- */
-func cgen_aret(n *gc.Node, res *gc.Node) {
- t := n.Left.Type
- if gc.Isptr[t.Etype] {
- t = t.Type
- }
-
- var flist gc.Iter
- fp := gc.Structfirst(&flist, gc.Getoutarg(t))
- if fp == nil {
- gc.Fatal("cgen_aret: nil")
- }
-
- var nod1 gc.Node
- nod1.Op = gc.OINDREG
- nod1.Val.U.Reg = ppc64.REGSP
- nod1.Addable = 1
-
- nod1.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved lr at 0(SP)
- nod1.Type = fp.Type
-
- if res.Op != gc.OREGISTER {
- var nod2 gc.Node
- regalloc(&nod2, gc.Types[gc.Tptr], res)
- agen(&nod1, &nod2)
- gins(ppc64.AMOVD, &nod2, res)
- regfree(&nod2)
- } else {
- agen(&nod1, res)
- }
-}
-
-/*
- * generate return.
- * n->left is assignments to return values.
- */
-func cgen_ret(n *gc.Node) {
- if n != nil {
- gc.Genlist(n.List) // copy out args
- }
- if gc.Hasdefer != 0 {
- ginscall(gc.Deferreturn, 0)
- }
- gc.Genlist(gc.Curfn.Exit)
- p := gins(obj.ARET, nil, nil)
- if n != nil && n.Op == gc.ORETJMP {
- p.To.Name = obj.NAME_EXTERN
- p.To.Type = obj.TYPE_ADDR
- p.To.Sym = gc.Linksym(n.Left.Sym)
- }
-}
+var panicdiv *gc.Node
/*
* generate division.
a := optoas(gc.ODIV, t)
var tl gc.Node
- regalloc(&tl, t0, nil)
+ gc.Regalloc(&tl, t0, nil)
var tr gc.Node
- regalloc(&tr, t0, nil)
+ gc.Regalloc(&tr, t0, nil)
if nl.Ullman >= nr.Ullman {
- cgen(nl, &tl)
- cgen(nr, &tr)
+ gc.Cgen(nl, &tl)
+ gc.Cgen(nr, &tr)
} else {
- cgen(nr, &tr)
- cgen(nl, &tl)
+ gc.Cgen(nr, &tr)
+ gc.Cgen(nl, &tl)
}
if t != t0 {
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
- ginscall(panicdiv, -1)
+ gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
var p2 *obj.Prog
p1 = gins(a, &tr, &tl)
if op == gc.ODIV {
- regfree(&tr)
+ gc.Regfree(&tr)
gmove(&tl, res)
} else {
// A%B = A-(A/B*B)
var tm gc.Node
- regalloc(&tm, t, nil)
+ gc.Regalloc(&tm, t, nil)
// patch div to use the 3 register form
// TODO(minux): add gins3?
p1.To.Reg = tm.Val.U.Reg
gins(optoas(gc.OMUL, t), &tr, &tm)
- regfree(&tr)
+ gc.Regfree(&tr)
gins(optoas(gc.OSUB, t), &tm, &tl)
- regfree(&tm)
+ gc.Regfree(&tm)
gmove(&tl, res)
}
- regfree(&tl)
+ gc.Regfree(&tl)
if check != 0 {
gc.Patch(p2, gc.Pc)
}
}
-/*
- * generate division according to op, one of:
- * res = nl / nr
- * res = nl % nr
- */
-func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- // TODO(minux): enable division by magic multiply (also need to fix longmod below)
- //if(nr->op != OLITERAL)
- // division and mod using (slow) hardware instruction
- dodiv(op, nl, nr, res)
-
- return
-}
-
/*
* generate high multiply:
* res = (nl*nr) >> width
t := (*gc.Type)(nl.Type)
w := int(int(t.Width * 8))
var n1 gc.Node
- cgenr(nl, &n1, res)
+ gc.Cgenr(nl, &n1, res)
var n2 gc.Node
- cgenr(nr, &n2, nil)
+ gc.Cgenr(nr, &n2, nil)
switch gc.Simtype[t.Etype] {
case gc.TINT8,
gc.TINT16,
gc.Fatal("cgen_hmul %v", gc.Tconv(t, 0))
}
- cgen(&n1, res)
- regfree(&n1)
- regfree(&n2)
+ gc.Cgen(&n1, res)
+ gc.Regfree(&n1)
+ gc.Regfree(&n2)
}
/*
if nr.Op == gc.OLITERAL {
var n1 gc.Node
- regalloc(&n1, nl.Type, res)
- cgen(nl, &n1)
+ gc.Regalloc(&n1, nl.Type, res)
+ gc.Cgen(nl, &n1)
sc := uint64(uint64(gc.Mpgetfix(nr.Val.U.Xval)))
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
gins(a, nr, &n1)
}
gmove(&n1, res)
- regfree(&n1)
+ gc.Regfree(&n1)
return
}
if nl.Ullman >= gc.UINF {
var n4 gc.Node
gc.Tempname(&n4, nl.Type)
- cgen(nl, &n4)
+ gc.Cgen(nl, &n4)
nl = &n4
}
if nr.Ullman >= gc.UINF {
var n5 gc.Node
gc.Tempname(&n5, nr.Type)
- cgen(nr, &n5)
+ gc.Cgen(nr, &n5)
nr = &n5
}
}
var n1 gc.Node
- regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
+ gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
var n3 gc.Node
- regalloc(&n3, tcount, &n1) // to clear high bits of CX
+ gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
var n2 gc.Node
- regalloc(&n2, nl.Type, res)
+ gc.Regalloc(&n2, nl.Type, res)
if nl.Ullman >= nr.Ullman {
- cgen(nl, &n2)
- cgen(nr, &n1)
+ gc.Cgen(nl, &n2)
+ gc.Cgen(nr, &n1)
gmove(&n1, &n3)
} else {
- cgen(nr, &n1)
+ gc.Cgen(nr, &n1)
gmove(&n1, &n3)
- cgen(nl, &n2)
+ gc.Cgen(nl, &n2)
}
- regfree(&n3)
+ gc.Regfree(&n3)
// test and fix up large shifts
if !bounded {
gmove(&n2, res)
- regfree(&n1)
- regfree(&n2)
+ gc.Regfree(&n1)
+ gc.Regfree(&n2)
}
func clearfat(nl *gc.Node) {
c := uint64(w % 8) // bytes
q := uint64(w / 8) // dwords
- if reg[ppc64.REGRT1-ppc64.REG_R0] > 0 {
- gc.Fatal("R%d in use during clearfat", ppc64.REGRT1-ppc64.REG_R0)
+ if gc.Reginuse(ppc64.REGRT1) {
+ gc.Fatal("%v in use during clearfat", obj.Rconv(ppc64.REGRT1))
}
var r0 gc.Node
gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO)
var dst gc.Node
gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1)
- reg[ppc64.REGRT1-ppc64.REG_R0]++
- agen(nl, &dst)
+ gc.Regrealloc(&dst)
+ gc.Agen(nl, &dst)
var boff uint64
if q > 128 {
p.From.Offset = 8
var end gc.Node
- regalloc(&end, gc.Types[gc.Tptr], nil)
+ gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p = gins(ppc64.AMOVD, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q * 8)
p = gins(ppc64.ACMP, &dst, &end)
gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
- regfree(&end)
+ gc.Regfree(&end)
// The loop leaves R3 on the last zeroed dword
boff = 8
p.To.Offset = int64(t + boff)
}
- reg[ppc64.REGRT1-ppc64.REG_R0]--
+ gc.Regfree(&dst)
}
// Called after regopt and peep have run.
ppc64.FREGTWO,
}
-func ginit() {
- for i := 0; i < len(reg); i++ {
- reg[i] = 1
- }
- for i := 0; i < ppc64.NREG+ppc64.NFREG; i++ {
- reg[i] = 0
- }
-
- for i := 0; i < len(resvd); i++ {
- reg[resvd[i]-ppc64.REG_R0]++
- }
-}
-
-var regpc [len(reg)]uint32
-
-func gclean() {
- for i := int(0); i < len(resvd); i++ {
- reg[resvd[i]-ppc64.REG_R0]--
- }
-
- for i := int(0); i < len(reg); i++ {
- if reg[i] != 0 {
- gc.Yyerror("reg %v left allocated, %p\n", obj.Rconv(i+ppc64.REG_R0), regpc[i])
- }
- }
-}
-
-func anyregalloc() bool {
- var j int
-
- for i := int(0); i < len(reg); i++ {
- if reg[i] == 0 {
- goto ok
- }
- for j = 0; j < len(resvd); j++ {
- if resvd[j] == i {
- goto ok
- }
- }
- return true
- ok:
- }
-
- return false
-}
-
-/*
- * allocate register of type t, leave in n.
- * if o != N, o is desired fixed register.
- * caller must regfree(n).
- */
-func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
- if t == nil {
- gc.Fatal("regalloc: t nil")
- }
- et := int(int(gc.Simtype[t.Etype]))
-
- if gc.Debug['r'] != 0 {
- fixfree := int(0)
- fltfree := int(0)
- for i := int(ppc64.REG_R0); i < ppc64.REG_F31; i++ {
- if reg[i-ppc64.REG_R0] == 0 {
- if i < ppc64.REG_F0 {
- fixfree++
- } else {
- fltfree++
- }
- }
- }
-
- fmt.Printf("regalloc fix %d flt %d free\n", fixfree, fltfree)
- }
-
- var i int
- switch et {
- case gc.TINT8,
- gc.TUINT8,
- gc.TINT16,
- gc.TUINT16,
- gc.TINT32,
- gc.TUINT32,
- gc.TINT64,
- gc.TUINT64,
- gc.TPTR32,
- gc.TPTR64,
- gc.TBOOL:
- if o != nil && o.Op == gc.OREGISTER {
- i = int(o.Val.U.Reg)
- if i >= ppc64.REGMIN && i <= ppc64.REGMAX {
- goto out
- }
- }
-
- for i = ppc64.REGMIN; i <= ppc64.REGMAX; i++ {
- if reg[i-ppc64.REG_R0] == 0 {
- regpc[i-ppc64.REG_R0] = uint32(obj.Getcallerpc(&n))
- goto out
- }
- }
-
- gc.Flusherrors()
- for i := int(ppc64.REG_R0); i < ppc64.REG_R0+ppc64.NREG; i++ {
- fmt.Printf("R%d %p\n", i, regpc[i-ppc64.REG_R0])
- }
- gc.Fatal("out of fixed registers")
-
- case gc.TFLOAT32,
- gc.TFLOAT64:
- if o != nil && o.Op == gc.OREGISTER {
- i = int(o.Val.U.Reg)
- if i >= ppc64.FREGMIN && i <= ppc64.FREGMAX {
- goto out
- }
- }
-
- for i = ppc64.FREGMIN; i <= ppc64.FREGMAX; i++ {
- if reg[i-ppc64.REG_R0] == 0 {
- regpc[i-ppc64.REG_R0] = uint32(obj.Getcallerpc(&n))
- goto out
- }
- }
-
- gc.Flusherrors()
- for i := int(ppc64.REG_F0); i < ppc64.REG_F0+ppc64.NREG; i++ {
- fmt.Printf("F%d %p\n", i, regpc[i-ppc64.REG_R0])
- }
- gc.Fatal("out of floating registers")
-
- case gc.TCOMPLEX64,
- gc.TCOMPLEX128:
- gc.Tempname(n, t)
- return
- }
-
- gc.Fatal("regalloc: unknown type %v", gc.Tconv(t, 0))
- return
-
-out:
- reg[i-ppc64.REG_R0]++
- gc.Nodreg(n, t, i)
-}
-
-func regfree(n *gc.Node) {
- if n.Op == gc.ONAME {
- return
- }
- if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
- gc.Fatal("regfree: not a register")
- }
- i := int(int(n.Val.U.Reg) - ppc64.REG_R0)
- if i == ppc64.REGSP-ppc64.REG_R0 {
- return
- }
- if i < 0 || i >= len(reg) {
- gc.Fatal("regfree: reg out of range")
- }
- if reg[i] <= 0 {
- gc.Fatal("regfree: reg not allocated")
- }
- reg[i]--
- if reg[i] == 0 {
- regpc[i] = 0
- }
-}
-
/*
* generate
* as $c, n
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
- if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) {
+ if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) || n2.Op != gc.OREGISTER || as == ppc64.AMULLD {
// cannot have more than 16-bit of immediate in ADD, etc.
// instead, MOV into register first.
var ntmp gc.Node
- regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+ gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
- gins(ppc64.AMOVD, &n1, &ntmp)
- gins(as, &ntmp, n2)
- regfree(&ntmp)
+ rawgins(ppc64.AMOVD, &n1, &ntmp)
+ rawgins(as, &ntmp, n2)
+ gc.Regfree(&ntmp)
return
}
- gins(as, &n1, n2)
+ rawgins(as, &n1, n2)
}
/*
case ppc64.ACMP:
if -ppc64.BIG <= c && c <= ppc64.BIG {
- gins(as, n2, &n1)
+ rawgins(as, n2, &n1)
return
}
case ppc64.ACMPU:
if 0 <= c && c <= 2*ppc64.BIG {
- gins(as, n2, &n1)
+ rawgins(as, n2, &n1)
return
}
}
// MOV n1 into register first
var ntmp gc.Node
- regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+ gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
- gins(ppc64.AMOVD, &n1, &ntmp)
- gins(as, n2, &ntmp)
- regfree(&ntmp)
+ rawgins(ppc64.AMOVD, &n1, &ntmp)
+ rawgins(as, n2, &ntmp)
+ gc.Regfree(&ntmp)
}
/*
var con gc.Node
gc.Convconst(&con, gc.Types[gc.TINT64], &f.Val)
var r1 gc.Node
- regalloc(&r1, con.Type, t)
+ gc.Regalloc(&r1, con.Type, t)
gins(ppc64.AMOVD, &con, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
case gc.TUINT32,
var con gc.Node
gc.Convconst(&con, gc.Types[gc.TUINT64], &f.Val)
var r1 gc.Node
- regalloc(&r1, con.Type, t)
+ gc.Regalloc(&r1, con.Type, t)
gins(ppc64.AMOVD, &con, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
}
bignodes()
var r1 gc.Node
- regalloc(&r1, gc.Types[ft], f)
+ gc.Regalloc(&r1, gc.Types[ft], f)
gmove(f, &r1)
if tt == gc.TUINT64 {
- regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
+ gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
gmove(&bigf, &r2)
gins(ppc64.AFCMPU, &r1, &r2)
p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1))
gins(ppc64.AFSUB, &r2, &r1)
gc.Patch(p1, gc.Pc)
- regfree(&r2)
+ gc.Regfree(&r2)
}
- regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
+ gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
var r3 gc.Node
- regalloc(&r3, gc.Types[gc.TINT64], t)
+ gc.Regalloc(&r3, gc.Types[gc.TINT64], t)
gins(ppc64.AFCTIDZ, &r1, &r2)
p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil))
p1.To.Type = obj.TYPE_MEM
p1.From.Type = obj.TYPE_MEM
p1.From.Reg = ppc64.REGSP
p1.From.Offset = -8
- regfree(&r2)
- regfree(&r1)
+ gc.Regfree(&r2)
+ gc.Regfree(&r1)
if tt == gc.TUINT64 {
p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again
gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
}
gmove(&r3, t)
- regfree(&r3)
+ gc.Regfree(&r3)
return
//warn("gmove: convert int to float not implemented: %N -> %N\n", f, t);
bignodes()
var r1 gc.Node
- regalloc(&r1, gc.Types[gc.TINT64], nil)
+ gc.Regalloc(&r1, gc.Types[gc.TINT64], nil)
gmove(f, &r1)
if ft == gc.TUINT64 {
gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP)
gc.Patch(p1, gc.Pc)
}
- regalloc(&r2, gc.Types[gc.TFLOAT64], t)
+ gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t)
p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil))
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = ppc64.REGSP
p1.From.Reg = ppc64.REGSP
p1.From.Offset = -8
gins(ppc64.AFCFID, &r2, &r2)
- regfree(&r1)
+ gc.Regfree(&r1)
if ft == gc.TUINT64 {
p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again
gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO)
}
gmove(&r2, t)
- regfree(&r2)
+ gc.Regfree(&r2)
return
/*
// requires register destination
rdst:
{
- regalloc(&r1, t.Type, t)
+ gc.Regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
return
}
// requires register intermediate
hard:
- regalloc(&r1, cvt, t)
+ gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
- regfree(&r1)
+ gc.Regfree(&r1)
+ return
+}
+
+func intLiteral(n *gc.Node) (x int64, ok bool) {
+ if n == nil || n.Op != gc.OLITERAL {
+ return
+ }
+ switch n.Val.Ctype {
+ case gc.CTINT, gc.CTRUNE:
+ return gc.Mpgetfix(n.Val.U.Xval), true
+ case gc.CTBOOL:
+ return int64(n.Val.U.Bval), true
+ }
return
}
+// gins is called by the front end.
+// It synthesizes some multiple-instruction sequences
+// so the front end can stay simpler.
+func gins(as int, f, t *gc.Node) *obj.Prog {
+ if as >= obj.A_ARCHSPECIFIC {
+ if x, ok := intLiteral(f); ok {
+ ginscon(as, x, t)
+ return nil // caller must not use
+ }
+ }
+ if as == ppc64.ACMP || as == ppc64.ACMPU {
+ if x, ok := intLiteral(t); ok {
+ ginscon2(as, f, x)
+ return nil // caller must not use
+ }
+ }
+ return rawgins(as, f, t)
+}
+
/*
* generate one instruction:
* as f, t
*/
-func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
// TODO(austin): Add self-move test like in 6g (but be careful
// of truncation moves)
gc.Naddr(&p.From, f)
gc.Naddr(&p.To, t)
+ switch as {
+ case obj.ACALL:
+ if p.To.Type == obj.TYPE_REG && p.To.Reg != ppc64.REG_CTR {
+ // Allow front end to emit CALL REG, and rewrite into MOV REG, CTR; CALL CTR.
+ pp := gc.Prog(as)
+ pp.From = p.From
+ pp.To.Type = obj.TYPE_REG
+ pp.To.Reg = ppc64.REG_CTR
+
+ p.As = ppc64.AMOVD
+ p.From = p.To
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v\n", p)
+ fmt.Printf("%v\n", pp)
+ }
+
+ return pp
+ }
+
+ // Bad things the front end has done to us. Crash to find call stack.
+ case ppc64.AAND, ppc64.AMULLD:
+ if p.From.Type == obj.TYPE_CONST {
+ gc.Debug['h'] = 1
+ gc.Fatal("bad inst: %v", p)
+ }
+ case ppc64.ACMP, ppc64.ACMPU:
+ if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
+ gc.Debug['h'] = 1
+ gc.Fatal("bad inst: %v", p)
+ }
+ }
+
if gc.Debug['g'] != 0 {
fmt.Printf("%v\n", p)
}
// ACMPU
gc.OLE<<16 | gc.TUINT16,
gc.OLE<<16 | gc.TUINT32,
- gc.OLE<<16 | gc.TUINT64,
- gc.OLE<<16 | gc.TFLOAT32,
- // AFCMPU
- gc.OLE<<16 | gc.TFLOAT64:
+ gc.OLE<<16 | gc.TUINT64:
+ // No OLE for floats, because it mishandles NaN.
+ // Front end must reverse comparison or use OLT and OEQ together.
a = ppc64.ABLE
case gc.OGT<<16 | gc.TINT8,
gc.OGE<<16 | gc.TUINT8,
gc.OGE<<16 | gc.TUINT16,
gc.OGE<<16 | gc.TUINT32,
- gc.OGE<<16 | gc.TUINT64,
- gc.OGE<<16 | gc.TFLOAT32,
- gc.OGE<<16 | gc.TFLOAT64:
+ gc.OGE<<16 | gc.TUINT64:
+ // No OGE for floats, because it mishandles NaN.
+ // Front end must reverse comparison or use OLT and OEQ together.
a = ppc64.ABGE
case gc.OCMP<<16 | gc.TBOOL,
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+/*
+ * generate:
+ * res = n;
+ * simplifies and calls Thearch.Gmove.
+ */
+func Cgen(n *Node, res *Node) {
+ if Debug['g'] != 0 {
+ Dump("\ncgen-n", n)
+ Dump("cgen-res", res)
+ }
+
+ if n == nil || n.Type == nil {
+ return
+ }
+
+ if res == nil || res.Type == nil {
+ Fatal("cgen: res nil")
+ }
+
+ for n.Op == OCONVNOP {
+ n = n.Left
+ }
+
+ switch n.Op {
+ case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
+ if res.Op != ONAME || res.Addable == 0 {
+ var n1 Node
+ Tempname(&n1, n.Type)
+ Cgen_slice(n, &n1)
+ Cgen(&n1, res)
+ } else {
+ Cgen_slice(n, res)
+ }
+ return
+
+ case OEFACE:
+ if res.Op != ONAME || res.Addable == 0 {
+ var n1 Node
+ Tempname(&n1, n.Type)
+ Cgen_eface(n, &n1)
+ Cgen(&n1, res)
+ } else {
+ Cgen_eface(n, res)
+ }
+ return
+ }
+
+ if n.Ullman >= UINF {
+ if n.Op == OINDREG {
+ Fatal("cgen: this is going to miscompile")
+ }
+ if res.Ullman >= UINF {
+ var n1 Node
+ Tempname(&n1, n.Type)
+ Cgen(n, &n1)
+ Cgen(&n1, res)
+ return
+ }
+ }
+
+ if Isfat(n.Type) {
+ if n.Type.Width < 0 {
+ Fatal("forgot to compute width for %v", Tconv(n.Type, 0))
+ }
+ sgen(n, res, n.Type.Width)
+ return
+ }
+
+ if res.Addable == 0 {
+ if n.Ullman > res.Ullman {
+ if Ctxt.Arch.Regsize == 4 && Is64(n.Type) {
+ var n1 Node
+ Tempname(&n1, n.Type)
+ Cgen(n, &n1)
+ Cgen(&n1, res)
+ return
+ }
+
+ var n1 Node
+ Regalloc(&n1, n.Type, res)
+ Cgen(n, &n1)
+ if n1.Ullman > res.Ullman {
+ Dump("n1", &n1)
+ Dump("res", res)
+ Fatal("loop in cgen")
+ }
+
+ Cgen(&n1, res)
+ Regfree(&n1)
+ return
+ }
+
+ var f int
+ if res.Ullman >= UINF {
+ goto gen
+ }
+
+ if Complexop(n, res) {
+ Complexgen(n, res)
+ return
+ }
+
+ f = 1 // gen thru register
+ switch n.Op {
+ case OLITERAL:
+ if Smallintconst(n) {
+ f = 0
+ }
+
+ case OREGISTER:
+ f = 0
+ }
+
+ if !Iscomplex[n.Type.Etype] && Ctxt.Arch.Regsize == 8 {
+ a := Thearch.Optoas(OAS, res.Type)
+ var addr obj.Addr
+ if Thearch.Sudoaddable(a, res, &addr) {
+ var p1 *obj.Prog
+ if f != 0 {
+ var n2 Node
+ Regalloc(&n2, res.Type, nil)
+ Cgen(n, &n2)
+ p1 = Thearch.Gins(a, &n2, nil)
+ Regfree(&n2)
+ } else {
+ p1 = Thearch.Gins(a, n, nil)
+ }
+ p1.To = addr
+ if Debug['g'] != 0 {
+ fmt.Printf("%v [ignore previous line]\n", p1)
+ }
+ Thearch.Sudoclean()
+ return
+ }
+ }
+
+ gen:
+ if Ctxt.Arch.Thechar == '8' {
+ // no registers to speak of
+ var n1, n2 Node
+ Tempname(&n1, n.Type)
+ Cgen(n, &n1)
+ Igen(res, &n2, nil)
+ Thearch.Gmove(&n1, &n2)
+ Regfree(&n2)
+ return
+ }
+
+ var n1 Node
+ Igen(res, &n1, nil)
+ Cgen(n, &n1)
+ Regfree(&n1)
+ return
+ }
+
+ // update addressability for string, slice
+ // can't do in walk because n->left->addable
+ // changes if n->left is an escaping local variable.
+ switch n.Op {
+ case OSPTR,
+ OLEN:
+ if Isslice(n.Left.Type) || Istype(n.Left.Type, TSTRING) {
+ n.Addable = n.Left.Addable
+ }
+
+ case OCAP:
+ if Isslice(n.Left.Type) {
+ n.Addable = n.Left.Addable
+ }
+
+ case OITAB:
+ n.Addable = n.Left.Addable
+ }
+
+ if Ctxt.Arch.Thechar == '5' { // TODO(rsc): Maybe more often?
+ // if both are addressable, move
+ if n.Addable != 0 && res.Addable != 0 {
+ if Is64(n.Type) || Is64(res.Type) || n.Op == OREGISTER || res.Op == OREGISTER || Iscomplex[n.Type.Etype] || Iscomplex[res.Type.Etype] {
+ Thearch.Gmove(n, res)
+ } else {
+ var n1 Node
+ Regalloc(&n1, n.Type, nil)
+ Thearch.Gmove(n, &n1)
+ Cgen(&n1, res)
+ Regfree(&n1)
+ }
+
+ return
+ }
+
+ // if both are not addressable, use a temporary.
+ if n.Addable == 0 && res.Addable == 0 {
+ // could use regalloc here sometimes,
+ // but have to check for ullman >= UINF.
+ var n1 Node
+ Tempname(&n1, n.Type)
+ Cgen(n, &n1)
+ Cgen(&n1, res)
+ return
+ }
+
+ // if result is not addressable directly but n is,
+ // compute its address and then store via the address.
+ if res.Addable == 0 {
+ var n1 Node
+ Igen(res, &n1, nil)
+ Cgen(n, &n1)
+ Regfree(&n1)
+ return
+ }
+ }
+
+ if Complexop(n, res) {
+ Complexgen(n, res)
+ return
+ }
+
+ if (Ctxt.Arch.Thechar == '6' || Ctxt.Arch.Thechar == '8') && n.Addable != 0 {
+ Thearch.Gmove(n, res)
+ return
+ }
+
+ if Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
+ // if both are addressable, move
+ if n.Addable != 0 {
+ if n.Op == OREGISTER || res.Op == OREGISTER {
+ Thearch.Gmove(n, res)
+ } else {
+ var n1 Node
+ Regalloc(&n1, n.Type, nil)
+ Thearch.Gmove(n, &n1)
+ Cgen(&n1, res)
+ Regfree(&n1)
+ }
+ return
+ }
+ }
+
+ // if n is sudoaddable generate addr and move
+ if Ctxt.Arch.Thechar == '5' && !Is64(n.Type) && !Is64(res.Type) && !Iscomplex[n.Type.Etype] && !Iscomplex[res.Type.Etype] {
+ a := Thearch.Optoas(OAS, n.Type)
+ var addr obj.Addr
+ if Thearch.Sudoaddable(a, n, &addr) {
+ if res.Op != OREGISTER {
+ var n2 Node
+ Regalloc(&n2, res.Type, nil)
+ p1 := Thearch.Gins(a, nil, &n2)
+ p1.From = addr
+ if Debug['g'] != 0 {
+ fmt.Printf("%v [ignore previous line]\n", p1)
+ }
+ Thearch.Gmove(&n2, res)
+ Regfree(&n2)
+ } else {
+ p1 := Thearch.Gins(a, nil, res)
+ p1.From = addr
+ if Debug['g'] != 0 {
+ fmt.Printf("%v [ignore previous line]\n", p1)
+ }
+ }
+ Thearch.Sudoclean()
+ return
+ }
+ }
+
+ nl := n.Left
+ nr := n.Right
+
+ if nl != nil && nl.Ullman >= UINF {
+ if nr != nil && nr.Ullman >= UINF {
+ var n1 Node
+ Tempname(&n1, nl.Type)
+ Cgen(nl, &n1)
+ n2 := *n
+ n2.Left = &n1
+ Cgen(&n2, res)
+ return
+ }
+ }
+
+ // 64-bit ops are hard on 32-bit machine.
+ if Ctxt.Arch.Regsize == 4 && (Is64(n.Type) || Is64(res.Type) || n.Left != nil && Is64(n.Left.Type)) {
+ switch n.Op {
+ // math goes to cgen64.
+ case OMINUS,
+ OCOM,
+ OADD,
+ OSUB,
+ OMUL,
+ OLROT,
+ OLSH,
+ ORSH,
+ OAND,
+ OOR,
+ OXOR:
+ Thearch.Cgen64(n, res)
+ return
+ }
+ }
+
+ if Thearch.Cgen_float != nil && nl != nil && Isfloat[n.Type.Etype] && Isfloat[nl.Type.Etype] {
+ Thearch.Cgen_float(n, res)
+ return
+ }
+
+ if !Iscomplex[n.Type.Etype] && Ctxt.Arch.Regsize == 8 {
+ a := Thearch.Optoas(OAS, n.Type)
+ var addr obj.Addr
+ if Thearch.Sudoaddable(a, n, &addr) {
+ if res.Op == OREGISTER {
+ p1 := Thearch.Gins(a, nil, res)
+ p1.From = addr
+ } else {
+ var n2 Node
+ Regalloc(&n2, n.Type, nil)
+ p1 := Thearch.Gins(a, nil, &n2)
+ p1.From = addr
+ Thearch.Gins(a, &n2, res)
+ Regfree(&n2)
+ }
+
+ Thearch.Sudoclean()
+ return
+ }
+ }
+
+ var a int
+ switch n.Op {
+ default:
+ Dump("cgen", n)
+ Dump("cgen-res", res)
+ Fatal("cgen: unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ // these call bgen to get a bool value
+ case OOROR,
+ OANDAND,
+ OEQ,
+ ONE,
+ OLT,
+ OLE,
+ OGE,
+ OGT,
+ ONOT:
+ p1 := Gbranch(obj.AJMP, nil, 0)
+
+ p2 := Pc
+ Thearch.Gmove(Nodbool(true), res)
+ p3 := Gbranch(obj.AJMP, nil, 0)
+ Patch(p1, Pc)
+ Bgen(n, true, 0, p2)
+ Thearch.Gmove(Nodbool(false), res)
+ Patch(p3, Pc)
+ return
+
+ case OPLUS:
+ Cgen(nl, res)
+ return
+
+ // unary
+ case OCOM:
+ a := Thearch.Optoas(OXOR, nl.Type)
+
+ var n1 Node
+ Regalloc(&n1, nl.Type, nil)
+ Cgen(nl, &n1)
+ var n2 Node
+ Nodconst(&n2, nl.Type, -1)
+ Thearch.Gins(a, &n2, &n1)
+ cgen_norm(n, &n1, res)
+ return
+
+ case OMINUS:
+ if Isfloat[nl.Type.Etype] {
+ nr = Nodintconst(-1)
+ Convlit(&nr, n.Type)
+ a = Thearch.Optoas(OMUL, nl.Type)
+ goto sbop
+ }
+
+ a := Thearch.Optoas(int(n.Op), nl.Type)
+ // unary
+ var n1 Node
+ Regalloc(&n1, nl.Type, res)
+
+ Cgen(nl, &n1)
+ if Ctxt.Arch.Thechar == '5' {
+ var n2 Node
+ Nodconst(&n2, nl.Type, 0)
+ Thearch.Gins(a, &n2, &n1)
+ } else if Ctxt.Arch.Thechar == '7' {
+ Thearch.Gins(a, &n1, &n1)
+ } else {
+ Thearch.Gins(a, nil, &n1)
+ }
+ cgen_norm(n, &n1, res)
+ return
+
+ // symmetric binary
+ case OAND,
+ OOR,
+ OXOR,
+ OADD,
+ OMUL:
+ if n.Op == OMUL && Thearch.Cgen_bmul != nil && Thearch.Cgen_bmul(int(n.Op), nl, nr, res) {
+ break
+ }
+ a = Thearch.Optoas(int(n.Op), nl.Type)
+ goto sbop
+
+ // asymmetric binary
+ case OSUB:
+ a = Thearch.Optoas(int(n.Op), nl.Type)
+ goto abop
+
+ case OHMUL:
+ Thearch.Cgen_hmul(nl, nr, res)
+
+ case OCONV:
+ if Eqtype(n.Type, nl.Type) || Noconv(n.Type, nl.Type) {
+ Cgen(nl, res)
+ return
+ }
+
+ if Ctxt.Arch.Thechar == '8' {
+ var n1 Node
+ var n2 Node
+ Tempname(&n2, n.Type)
+ Mgen(nl, &n1, res)
+ Thearch.Gmove(&n1, &n2)
+ Thearch.Gmove(&n2, res)
+ Mfree(&n1)
+ break
+ }
+
+ var n1 Node
+ var n2 Node
+ if Ctxt.Arch.Thechar == '5' {
+ if nl.Addable != 0 && !Is64(nl.Type) {
+ Regalloc(&n1, nl.Type, res)
+ Thearch.Gmove(nl, &n1)
+ } else {
+ if n.Type.Width > int64(Widthptr) || Is64(nl.Type) || Isfloat[nl.Type.Etype] {
+ Tempname(&n1, nl.Type)
+ } else {
+ Regalloc(&n1, nl.Type, res)
+ }
+ Cgen(nl, &n1)
+ }
+ if n.Type.Width > int64(Widthptr) || Is64(n.Type) || Isfloat[n.Type.Etype] {
+ Tempname(&n2, n.Type)
+ } else {
+ Regalloc(&n2, n.Type, nil)
+ }
+ } else {
+ if n.Type.Width > nl.Type.Width {
+ // If loading from memory, do conversion during load,
+ // so as to avoid use of 8-bit register in, say, int(*byteptr).
+ switch nl.Op {
+ case ODOT, ODOTPTR, OINDEX, OIND, ONAME:
+ Igen(nl, &n1, res)
+ Regalloc(&n2, n.Type, res)
+ Thearch.Gmove(&n1, &n2)
+ Thearch.Gmove(&n2, res)
+ Regfree(&n2)
+ Regfree(&n1)
+ return
+ }
+ }
+ Regalloc(&n1, nl.Type, res)
+ Regalloc(&n2, n.Type, &n1)
+ Cgen(nl, &n1)
+ }
+
+ // if we do the conversion n1 -> n2 here
+ // reusing the register, then gmove won't
+ // have to allocate its own register.
+ Thearch.Gmove(&n1, &n2)
+ Thearch.Gmove(&n2, res)
+ if n2.Op == OREGISTER {
+ Regfree(&n2)
+ }
+ if n1.Op == OREGISTER {
+ Regfree(&n1)
+ }
+
+ case ODOT,
+ ODOTPTR,
+ OINDEX,
+ OIND,
+ ONAME: // PHEAP or PPARAMREF var
+ var n1 Node
+ Igen(n, &n1, res)
+
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+
+ // interface table is first word of interface value
+ case OITAB:
+ var n1 Node
+ Igen(nl, &n1, res)
+
+ n1.Type = n.Type
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+
+ case OSPTR:
+ // pointer is the first word of string or slice.
+ if Isconst(nl, CTSTR) {
+ var n1 Node
+ Regalloc(&n1, Types[Tptr], res)
+ p1 := Thearch.Gins(Thearch.Optoas(OAS, n1.Type), nil, &n1)
+ Datastring(nl.Val.U.Sval, &p1.From)
+ p1.From.Type = obj.TYPE_ADDR
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ break
+ }
+
+ var n1 Node
+ Igen(nl, &n1, res)
+ n1.Type = n.Type
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+
+ case OLEN:
+ if Istype(nl.Type, TMAP) || Istype(nl.Type, TCHAN) {
+ // map and chan have len in the first int-sized word.
+ // a zero pointer means zero length
+ var n1 Node
+ Regalloc(&n1, Types[Tptr], res)
+
+ Cgen(nl, &n1)
+
+ var n2 Node
+ Nodconst(&n2, Types[Tptr], 0)
+ Thearch.Gins(Thearch.Optoas(OCMP, Types[Tptr]), &n1, &n2)
+ p1 := Gbranch(Thearch.Optoas(OEQ, Types[Tptr]), nil, 0)
+
+ n2 = n1
+ n2.Op = OINDREG
+ n2.Type = Types[Simtype[TINT]]
+ Thearch.Gmove(&n2, &n1)
+
+ Patch(p1, Pc)
+
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ break
+ }
+
+ if Istype(nl.Type, TSTRING) || Isslice(nl.Type) {
+ // both slice and string have len one pointer into the struct.
+ // a zero pointer means zero length
+ var n1 Node
+ Igen(nl, &n1, res)
+
+ n1.Type = Types[Simtype[TUINT]]
+ n1.Xoffset += int64(Array_nel)
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ break
+ }
+
+ Fatal("cgen: OLEN: unknown type %v", Tconv(nl.Type, obj.FmtLong))
+
+ case OCAP:
+ if Istype(nl.Type, TCHAN) {
+ // chan has cap in the second int-sized word.
+ // a zero pointer means zero length
+ var n1 Node
+ Regalloc(&n1, Types[Tptr], res)
+
+ Cgen(nl, &n1)
+
+ var n2 Node
+ Nodconst(&n2, Types[Tptr], 0)
+ Thearch.Gins(Thearch.Optoas(OCMP, Types[Tptr]), &n1, &n2)
+ p1 := Gbranch(Thearch.Optoas(OEQ, Types[Tptr]), nil, 0)
+
+ n2 = n1
+ n2.Op = OINDREG
+ n2.Xoffset = int64(Widthint)
+ n2.Type = Types[Simtype[TINT]]
+ Thearch.Gmove(&n2, &n1)
+
+ Patch(p1, Pc)
+
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ break
+ }
+
+ if Isslice(nl.Type) {
+ var n1 Node
+ Igen(nl, &n1, res)
+ n1.Type = Types[Simtype[TUINT]]
+ n1.Xoffset += int64(Array_cap)
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ break
+ }
+
+ Fatal("cgen: OCAP: unknown type %v", Tconv(nl.Type, obj.FmtLong))
+
+ case OADDR:
+ if n.Bounded { // let race detector avoid nil checks
+ Disable_checknil++
+ }
+ Agen(nl, res)
+ if n.Bounded {
+ Disable_checknil--
+ }
+
+ case OCALLMETH:
+ cgen_callmeth(n, 0)
+ cgen_callret(n, res)
+
+ case OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_callret(n, res)
+
+ case OCALLFUNC:
+ cgen_call(n, 0)
+ cgen_callret(n, res)
+
+ case OMOD, ODIV:
+ if Isfloat[n.Type.Etype] || Thearch.Dodiv == nil {
+ a = Thearch.Optoas(int(n.Op), nl.Type)
+ goto abop
+ }
+
+ if nl.Ullman >= nr.Ullman {
+ var n1 Node
+ Regalloc(&n1, nl.Type, res)
+ Cgen(nl, &n1)
+ cgen_div(int(n.Op), &n1, nr, res)
+ Regfree(&n1)
+ } else {
+ var n2 Node
+ if !Smallintconst(nr) {
+ Regalloc(&n2, nr.Type, res)
+ Cgen(nr, &n2)
+ } else {
+ n2 = *nr
+ }
+
+ cgen_div(int(n.Op), nl, &n2, res)
+ if n2.Op != OLITERAL {
+ Regfree(&n2)
+ }
+ }
+
+ case OLSH,
+ ORSH,
+ OLROT:
+ Thearch.Cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
+ }
+
+ return
+
+ /*
+ * put simplest on right - we'll generate into left
+ * and then adjust it using the computation of right.
+ * constants and variables have the same ullman
+ * count, so look for constants specially.
+ *
+ * an integer constant we can use as an immediate
+ * is simpler than a variable - we can use the immediate
+ * in the adjustment instruction directly - so it goes
+ * on the right.
+ *
+ * other constants, like big integers or floating point
+ * constants, require a mov into a register, so those
+ * might as well go on the left, so we can reuse that
+ * register for the computation.
+ */
+sbop: // symmetric binary
+ if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (Smallintconst(nl) || (nr.Op == OLITERAL && !Smallintconst(nr)))) {
+ r := nl
+ nl = nr
+ nr = r
+ }
+
+abop: // asymmetric binary
+ var n1 Node
+ var n2 Node
+ if Ctxt.Arch.Thechar == '8' {
+ // no registers, sigh
+ if Smallintconst(nr) {
+ var n1 Node
+ Mgen(nl, &n1, res)
+ var n2 Node
+ Regalloc(&n2, nl.Type, &n1)
+ Thearch.Gmove(&n1, &n2)
+ Thearch.Gins(a, nr, &n2)
+ Thearch.Gmove(&n2, res)
+ Regfree(&n2)
+ Mfree(&n1)
+ } else if nl.Ullman >= nr.Ullman {
+ var nt Node
+ Tempname(&nt, nl.Type)
+ Cgen(nl, &nt)
+ var n2 Node
+ Mgen(nr, &n2, nil)
+ var n1 Node
+ Regalloc(&n1, nl.Type, res)
+ Thearch.Gmove(&nt, &n1)
+ Thearch.Gins(a, &n2, &n1)
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ Mfree(&n2)
+ } else {
+ var n2 Node
+ Regalloc(&n2, nr.Type, res)
+ Cgen(nr, &n2)
+ var n1 Node
+ Regalloc(&n1, nl.Type, nil)
+ Cgen(nl, &n1)
+ Thearch.Gins(a, &n2, &n1)
+ Regfree(&n2)
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ }
+ return
+ }
+
+ if nl.Ullman >= nr.Ullman {
+ Regalloc(&n1, nl.Type, res)
+ Cgen(nl, &n1)
+
+ if Smallintconst(nr) && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
+ n2 = *nr
+ } else {
+ Regalloc(&n2, nr.Type, nil)
+ Cgen(nr, &n2)
+ }
+ } else {
+ if Smallintconst(nr) && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
+ n2 = *nr
+ } else {
+ Regalloc(&n2, nr.Type, res)
+ Cgen(nr, &n2)
+ }
+
+ Regalloc(&n1, nl.Type, nil)
+ Cgen(nl, &n1)
+ }
+
+ Thearch.Gins(a, &n2, &n1)
+ if n2.Op != OLITERAL {
+ Regfree(&n2)
+ }
+ cgen_norm(n, &n1, res)
+}
+
+// cgen_norm moves n1 to res, truncating to expected type if necessary.
+// n1 is a register, and cgen_norm frees it.
+func cgen_norm(n, n1, res *Node) {
+ switch Ctxt.Arch.Thechar {
+ case '6', '8':
+ // We use sized math, so the result is already truncated.
+ default:
+ switch n.Op {
+ case OADD, OSUB, OMUL, ODIV, OCOM, OMINUS:
+ // TODO(rsc): What about left shift?
+ Thearch.Gins(Thearch.Optoas(OAS, n.Type), n1, n1)
+ }
+ }
+
+ Thearch.Gmove(n1, res)
+ Regfree(n1)
+}
+
+func Mgen(n *Node, n1 *Node, rg *Node) {
+ n1.Op = OEMPTY
+
+ if n.Addable != 0 {
+ *n1 = *n
+ if n1.Op == OREGISTER || n1.Op == OINDREG {
+ reg[n.Val.U.Reg-int16(Thearch.REGMIN)]++
+ }
+ return
+ }
+
+ Tempname(n1, n.Type)
+ Cgen(n, n1)
+ if n.Type.Width <= int64(Widthptr) || Isfloat[n.Type.Etype] {
+ n2 := *n1
+ Regalloc(n1, n.Type, rg)
+ Thearch.Gmove(&n2, n1)
+ }
+}
+
+func Mfree(n *Node) {
+ if n.Op == OREGISTER {
+ Regfree(n)
+ }
+}
+
+/*
+ * allocate a register (reusing res if possible) and generate
+ * a = n
+ * The caller must call Regfree(a).
+ */
+func Cgenr(n *Node, a *Node, res *Node) {
+ if Debug['g'] != 0 {
+ Dump("cgenr-n", n)
+ }
+
+ if Isfat(n.Type) {
+ Fatal("cgenr on fat node")
+ }
+
+ if n.Addable != 0 {
+ Regalloc(a, n.Type, res)
+ Thearch.Gmove(n, a)
+ return
+ }
+
+ switch n.Op {
+ case ONAME,
+ ODOT,
+ ODOTPTR,
+ OINDEX,
+ OCALLFUNC,
+ OCALLMETH,
+ OCALLINTER:
+ var n1 Node
+ Igen(n, &n1, res)
+ Regalloc(a, Types[Tptr], &n1)
+ Thearch.Gmove(&n1, a)
+ Regfree(&n1)
+
+ default:
+ Regalloc(a, n.Type, res)
+ Cgen(n, a)
+ }
+}
+
+/*
+ * allocate a register (reusing res if possible) and generate
+ * a = &n
+ * The caller must call Regfree(a).
+ * The generated code checks that the result is not nil.
+ */
+func Agenr(n *Node, a *Node, res *Node) {
+ if Debug['g'] != 0 {
+ Dump("\nagenr-n", n)
+ }
+
+ nl := n.Left
+ nr := n.Right
+
+ switch n.Op {
+ case ODOT, ODOTPTR, OCALLFUNC, OCALLMETH, OCALLINTER:
+ var n1 Node
+ Igen(n, &n1, res)
+ Regalloc(a, Types[Tptr], &n1)
+ Agen(&n1, a)
+ Regfree(&n1)
+
+ case OIND:
+ Cgenr(n.Left, a, res)
+ Cgen_checknil(a)
+
+ case OINDEX:
+ if Ctxt.Arch.Thechar == '5' {
+ var p2 *obj.Prog // to be patched to panicindex.
+ w := uint32(n.Type.Width)
+ bounded := Debug['B'] != 0 || n.Bounded
+ var n1 Node
+ var n3 Node
+ if nr.Addable != 0 {
+ var tmp Node
+ if !Isconst(nr, CTINT) {
+ Tempname(&tmp, Types[TINT32])
+ }
+ if !Isconst(nl, CTSTR) {
+ Agenr(nl, &n3, res)
+ }
+ if !Isconst(nr, CTINT) {
+ p2 = Thearch.Cgenindex(nr, &tmp, bounded)
+ Regalloc(&n1, tmp.Type, nil)
+ Thearch.Gmove(&tmp, &n1)
+ }
+ } else if nl.Addable != 0 {
+ if !Isconst(nr, CTINT) {
+ var tmp Node
+ Tempname(&tmp, Types[TINT32])
+ p2 = Thearch.Cgenindex(nr, &tmp, bounded)
+ Regalloc(&n1, tmp.Type, nil)
+ Thearch.Gmove(&tmp, &n1)
+ }
+
+ if !Isconst(nl, CTSTR) {
+ Agenr(nl, &n3, res)
+ }
+ } else {
+ var tmp Node
+ Tempname(&tmp, Types[TINT32])
+ p2 = Thearch.Cgenindex(nr, &tmp, bounded)
+ nr = &tmp
+ if !Isconst(nl, CTSTR) {
+ Agenr(nl, &n3, res)
+ }
+ Regalloc(&n1, tmp.Type, nil)
+ Thearch.Gins(Thearch.Optoas(OAS, tmp.Type), &tmp, &n1)
+ }
+
+ // &a is in &n3 (allocated in res)
+ // i is in &n1 (if not constant)
+ // w is width
+
+ // constant index
+ if Isconst(nr, CTINT) {
+ if Isconst(nl, CTSTR) {
+ Fatal("constant string constant index")
+ }
+ v := uint64(Mpgetfix(nr.Val.U.Xval))
+ var n2 Node
+ if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+ if Debug['B'] == 0 && !n.Bounded {
+ n1 = n3
+ n1.Op = OINDREG
+ n1.Type = Types[Tptr]
+ n1.Xoffset = int64(Array_nel)
+ var n4 Node
+ Regalloc(&n4, n1.Type, nil)
+ Thearch.Gmove(&n1, &n4)
+ Nodconst(&n2, Types[TUINT32], int64(v))
+ Thearch.Gins(Thearch.Optoas(OCMP, Types[TUINT32]), &n4, &n2)
+ Regfree(&n4)
+ p1 := Gbranch(Thearch.Optoas(OGT, Types[TUINT32]), nil, +1)
+ Ginscall(Panicindex, 0)
+ Patch(p1, Pc)
+ }
+
+ n1 = n3
+ n1.Op = OINDREG
+ n1.Type = Types[Tptr]
+ n1.Xoffset = int64(Array_array)
+ Thearch.Gmove(&n1, &n3)
+ }
+
+ Nodconst(&n2, Types[Tptr], int64(v*uint64(w)))
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+ *a = n3
+ break
+ }
+
+ var n2 Node
+ Regalloc(&n2, Types[TINT32], &n1) // i
+ Thearch.Gmove(&n1, &n2)
+ Regfree(&n1)
+
+ var n4 Node
+ if Debug['B'] == 0 && !n.Bounded {
+ // check bounds
+ if Isconst(nl, CTSTR) {
+ Nodconst(&n4, Types[TUINT32], int64(len(nl.Val.U.Sval)))
+ } else if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+ n1 = n3
+ n1.Op = OINDREG
+ n1.Type = Types[Tptr]
+ n1.Xoffset = int64(Array_nel)
+ Regalloc(&n4, Types[TUINT32], nil)
+ Thearch.Gmove(&n1, &n4)
+ } else {
+ Nodconst(&n4, Types[TUINT32], nl.Type.Bound)
+ }
+
+ Thearch.Gins(Thearch.Optoas(OCMP, Types[TUINT32]), &n2, &n4)
+ if n4.Op == OREGISTER {
+ Regfree(&n4)
+ }
+ p1 := Gbranch(Thearch.Optoas(OLT, Types[TUINT32]), nil, +1)
+ if p2 != nil {
+ Patch(p2, Pc)
+ }
+ Ginscall(Panicindex, 0)
+ Patch(p1, Pc)
+ }
+
+ if Isconst(nl, CTSTR) {
+ Regalloc(&n3, Types[Tptr], res)
+ p1 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), nil, &n3)
+ Datastring(nl.Val.U.Sval, &p1.From)
+ p1.From.Type = obj.TYPE_ADDR
+ } else if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+ n1 = n3
+ n1.Op = OINDREG
+ n1.Type = Types[Tptr]
+ n1.Xoffset = int64(Array_array)
+ Thearch.Gmove(&n1, &n3)
+ }
+
+ if w == 0 {
+ // nothing to do
+ } else if Thearch.AddIndex != nil && Thearch.AddIndex(&n2, int64(w), &n3) {
+ // done by back end
+ } else if w == 1 {
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+ } else {
+ Regalloc(&n4, Types[TUINT32], nil)
+ Nodconst(&n1, Types[TUINT32], int64(w))
+ Thearch.Gmove(&n1, &n4)
+ Thearch.Gins(Thearch.Optoas(OMUL, Types[TUINT32]), &n4, &n2)
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+ Regfree(&n4)
+ }
+ *a = n3
+ Regfree(&n2)
+ break
+ }
+ if Ctxt.Arch.Thechar == '8' {
+ var p2 *obj.Prog // to be patched to panicindex.
+ w := uint32(n.Type.Width)
+ bounded := Debug['B'] != 0 || n.Bounded
+ var n3 Node
+ var tmp Node
+ var n1 Node
+ if nr.Addable != 0 {
+ // Generate &nl first, and move nr into register.
+ if !Isconst(nl, CTSTR) {
+ Igen(nl, &n3, res)
+ }
+ if !Isconst(nr, CTINT) {
+ p2 = Thearch.Igenindex(nr, &tmp, bounded)
+ Regalloc(&n1, tmp.Type, nil)
+ Thearch.Gmove(&tmp, &n1)
+ }
+ } else if nl.Addable != 0 {
+ // Generate nr first, and move &nl into register.
+ if !Isconst(nr, CTINT) {
+ p2 = Thearch.Igenindex(nr, &tmp, bounded)
+ Regalloc(&n1, tmp.Type, nil)
+ Thearch.Gmove(&tmp, &n1)
+ }
+
+ if !Isconst(nl, CTSTR) {
+ Igen(nl, &n3, res)
+ }
+ } else {
+ p2 = Thearch.Igenindex(nr, &tmp, bounded)
+ nr = &tmp
+ if !Isconst(nl, CTSTR) {
+ Igen(nl, &n3, res)
+ }
+ Regalloc(&n1, tmp.Type, nil)
+ Thearch.Gins(Thearch.Optoas(OAS, tmp.Type), &tmp, &n1)
+ }
+
+ // For fixed array we really want the pointer in n3.
+ var n2 Node
+ if Isfixedarray(nl.Type) {
+ Regalloc(&n2, Types[Tptr], &n3)
+ Agen(&n3, &n2)
+ Regfree(&n3)
+ n3 = n2
+ }
+
+ // &a[0] is in n3 (allocated in res)
+ // i is in n1 (if not constant)
+ // len(a) is in nlen (if needed)
+ // w is width
+
+ // constant index
+ if Isconst(nr, CTINT) {
+ if Isconst(nl, CTSTR) {
+ Fatal("constant string constant index") // front end should handle
+ }
+ v := uint64(Mpgetfix(nr.Val.U.Xval))
+ if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+ if Debug['B'] == 0 && !n.Bounded {
+ nlen := n3
+ nlen.Type = Types[TUINT32]
+ nlen.Xoffset += int64(Array_nel)
+ Nodconst(&n2, Types[TUINT32], int64(v))
+ Thearch.Gins(Thearch.Optoas(OCMP, Types[TUINT32]), &nlen, &n2)
+ p1 := Gbranch(Thearch.Optoas(OGT, Types[TUINT32]), nil, +1)
+ Ginscall(Panicindex, -1)
+ Patch(p1, Pc)
+ }
+ }
+
+ // Load base pointer in n2 = n3.
+ Regalloc(&n2, Types[Tptr], &n3)
+
+ n3.Type = Types[Tptr]
+ n3.Xoffset += int64(Array_array)
+ Thearch.Gmove(&n3, &n2)
+ Regfree(&n3)
+ if v*uint64(w) != 0 {
+ Nodconst(&n1, Types[Tptr], int64(v*uint64(w)))
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n1, &n2)
+ }
+ *a = n2
+ break
+ }
+
+ // i is in register n1, extend to 32 bits.
+ t := Types[TUINT32]
+
+ if Issigned[n1.Type.Etype] {
+ t = Types[TINT32]
+ }
+
+ Regalloc(&n2, t, &n1) // i
+ Thearch.Gmove(&n1, &n2)
+ Regfree(&n1)
+
+ if Debug['B'] == 0 && !n.Bounded {
+ // check bounds
+ t := Types[TUINT32]
+
+ var nlen Node
+ if Isconst(nl, CTSTR) {
+ Nodconst(&nlen, t, int64(len(nl.Val.U.Sval)))
+ } else if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+ nlen = n3
+ nlen.Type = t
+ nlen.Xoffset += int64(Array_nel)
+ } else {
+ Nodconst(&nlen, t, nl.Type.Bound)
+ }
+
+ Thearch.Gins(Thearch.Optoas(OCMP, t), &n2, &nlen)
+ p1 := Gbranch(Thearch.Optoas(OLT, t), nil, +1)
+ if p2 != nil {
+ Patch(p2, Pc)
+ }
+ Ginscall(Panicindex, -1)
+ Patch(p1, Pc)
+ }
+
+ if Isconst(nl, CTSTR) {
+ Regalloc(&n3, Types[Tptr], res)
+ p1 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), nil, &n3)
+ Datastring(nl.Val.U.Sval, &p1.From)
+ p1.From.Type = obj.TYPE_ADDR
+ Thearch.Gins(Thearch.Optoas(OADD, n3.Type), &n2, &n3)
+ goto indexdone1
+ }
+
+ // Load base pointer in n3.
+ Regalloc(&tmp, Types[Tptr], &n3)
+
+ if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+ n3.Type = Types[Tptr]
+ n3.Xoffset += int64(Array_array)
+ Thearch.Gmove(&n3, &tmp)
+ }
+
+ Regfree(&n3)
+ n3 = tmp
+
+ if w == 0 {
+ // nothing to do
+ } else if Thearch.AddIndex != nil && Thearch.AddIndex(&n2, int64(w), &n3) {
+ // done by back end
+ } else if w == 1 {
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+ } else {
+ Nodconst(&tmp, Types[TUINT32], int64(w))
+ Thearch.Gins(Thearch.Optoas(OMUL, Types[TUINT32]), &tmp, &n2)
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+ }
+
+ indexdone1:
+ *a = n3
+ Regfree(&n2)
+ break
+ }
+
+ freelen := 0
+ w := uint64(n.Type.Width)
+
+ // Generate the non-addressable child first.
+ var n3 Node
+ var nlen Node
+ var tmp Node
+ var n1 Node
+ if nr.Addable != 0 {
+ goto irad
+ }
+ if nl.Addable != 0 {
+ Cgenr(nr, &n1, nil)
+ if !Isconst(nl, CTSTR) {
+ if Isfixedarray(nl.Type) {
+ Agenr(nl, &n3, res)
+ } else {
+ Igen(nl, &nlen, res)
+ freelen = 1
+ nlen.Type = Types[Tptr]
+ nlen.Xoffset += int64(Array_array)
+ Regalloc(&n3, Types[Tptr], res)
+ Thearch.Gmove(&nlen, &n3)
+ nlen.Type = Types[Simtype[TUINT]]
+ nlen.Xoffset += int64(Array_nel) - int64(Array_array)
+ }
+ }
+
+ goto index
+ }
+
+ Tempname(&tmp, nr.Type)
+ Cgen(nr, &tmp)
+ nr = &tmp
+
+ irad:
+ if !Isconst(nl, CTSTR) {
+ if Isfixedarray(nl.Type) {
+ Agenr(nl, &n3, res)
+ } else {
+ if nl.Addable == 0 {
+ // igen will need an addressable node.
+ var tmp2 Node
+ Tempname(&tmp2, nl.Type)
+
+ Cgen(nl, &tmp2)
+ nl = &tmp2
+ }
+
+ Igen(nl, &nlen, res)
+ freelen = 1
+ nlen.Type = Types[Tptr]
+ nlen.Xoffset += int64(Array_array)
+ Regalloc(&n3, Types[Tptr], res)
+ Thearch.Gmove(&nlen, &n3)
+ nlen.Type = Types[Simtype[TUINT]]
+ nlen.Xoffset += int64(Array_nel) - int64(Array_array)
+ }
+ }
+
+ if !Isconst(nr, CTINT) {
+ Cgenr(nr, &n1, nil)
+ }
+
+ goto index
+
+ // &a is in &n3 (allocated in res)
+ // i is in &n1 (if not constant)
+ // len(a) is in nlen (if needed)
+ // w is width
+
+ // constant index
+ index:
+ if Isconst(nr, CTINT) {
+ if Isconst(nl, CTSTR) {
+ Fatal("constant string constant index") // front end should handle
+ }
+ v := uint64(Mpgetfix(nr.Val.U.Xval))
+ if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+ if Debug['B'] == 0 && !n.Bounded {
+ if nlen.Op != OREGISTER && (Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9') {
+ var tmp2 Node
+ Regalloc(&tmp2, Types[Simtype[TUINT]], nil)
+ Thearch.Gmove(&nlen, &tmp2)
+ Regfree(&nlen) // in case it is OINDREG
+ nlen = tmp2
+ }
+ var n2 Node
+ Nodconst(&n2, Types[Simtype[TUINT]], int64(v))
+ if Smallintconst(nr) {
+ Thearch.Gins(Thearch.Optoas(OCMP, Types[Simtype[TUINT]]), &nlen, &n2)
+ } else {
+ Regalloc(&tmp, Types[Simtype[TUINT]], nil)
+ Thearch.Gmove(&n2, &tmp)
+ Thearch.Gins(Thearch.Optoas(OCMP, Types[Simtype[TUINT]]), &nlen, &tmp)
+ Regfree(&tmp)
+ }
+
+ p1 := Gbranch(Thearch.Optoas(OGT, Types[Simtype[TUINT]]), nil, +1)
+ Ginscall(Panicindex, -1)
+ Patch(p1, Pc)
+ }
+
+ Regfree(&nlen)
+ }
+
+ if v*w != 0 {
+ Thearch.Ginscon(Thearch.Optoas(OADD, Types[Tptr]), int64(v*w), &n3)
+ }
+ *a = n3
+ break
+ }
+
+ // type of the index
+ t := Types[TUINT64]
+
+ if Issigned[n1.Type.Etype] {
+ t = Types[TINT64]
+ }
+
+ var n2 Node
+ Regalloc(&n2, t, &n1) // i
+ Thearch.Gmove(&n1, &n2)
+ Regfree(&n1)
+
+ if Debug['B'] == 0 && !n.Bounded {
+ // check bounds
+ t = Types[Simtype[TUINT]]
+
+ if Is64(nr.Type) {
+ t = Types[TUINT64]
+ }
+ if Isconst(nl, CTSTR) {
+ Nodconst(&nlen, t, int64(len(nl.Val.U.Sval)))
+ } else if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+ if Is64(nr.Type) || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
+ var n5 Node
+ Regalloc(&n5, t, nil)
+ Thearch.Gmove(&nlen, &n5)
+ Regfree(&nlen)
+ nlen = n5
+ }
+ } else {
+ Nodconst(&nlen, t, nl.Type.Bound)
+ if !Smallintconst(&nlen) {
+ var n5 Node
+ Regalloc(&n5, t, nil)
+ Thearch.Gmove(&nlen, &n5)
+ nlen = n5
+ freelen = 1
+ }
+ }
+
+ Thearch.Gins(Thearch.Optoas(OCMP, t), &n2, &nlen)
+ p1 := Gbranch(Thearch.Optoas(OLT, t), nil, +1)
+ Ginscall(Panicindex, -1)
+ Patch(p1, Pc)
+ }
+
+ if Isconst(nl, CTSTR) {
+ Regalloc(&n3, Types[Tptr], res)
+ p1 := Thearch.Gins(Thearch.Optoas(OAS, n3.Type), nil, &n3) // XXX was LEAQ!
+ Datastring(nl.Val.U.Sval, &p1.From)
+ p1.From.Type = obj.TYPE_ADDR
+ Thearch.Gins(Thearch.Optoas(OADD, n3.Type), &n2, &n3)
+ goto indexdone
+ }
+
+ if w == 0 {
+ // nothing to do
+ } else if Thearch.AddIndex != nil && Thearch.AddIndex(&n2, int64(w), &n3) {
+ // done by back end
+ } else if w == 1 {
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+ } else {
+ Thearch.Ginscon(Thearch.Optoas(OMUL, t), int64(w), &n2)
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+ }
+
+ indexdone:
+ *a = n3
+ Regfree(&n2)
+ if freelen != 0 {
+ Regfree(&nlen)
+ }
+
+ default:
+ Regalloc(a, Types[Tptr], res)
+ Agen(n, a)
+ }
+}
+
+/*
+ * generate:
+ * res = &n;
+ * The generated code checks that the result is not nil.
+ */
+func Agen(n *Node, res *Node) {
+ if Debug['g'] != 0 {
+ Dump("\nagen-res", res)
+ Dump("agen-r", n)
+ }
+
+ if n == nil || n.Type == nil {
+ return
+ }
+
+ for n.Op == OCONVNOP {
+ n = n.Left
+ }
+
+ if Isconst(n, CTNIL) && n.Type.Width > int64(Widthptr) {
+ // Use of a nil interface or nil slice.
+ // Create a temporary we can take the address of and read.
+ // The generated code is just going to panic, so it need not
+ // be terribly efficient. See issue 3670.
+ var n1 Node
+ Tempname(&n1, n.Type)
+
+ Gvardef(&n1)
+ Thearch.Clearfat(&n1)
+ var n2 Node
+ Regalloc(&n2, Types[Tptr], res)
+ var n3 Node
+ n3.Op = OADDR
+ n3.Left = &n1
+ Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &n3, &n2)
+ Thearch.Gmove(&n2, res)
+ Regfree(&n2)
+ return
+ }
+
+ if n.Addable != 0 {
+ if n.Op == OREGISTER {
+ Fatal("agen OREGISTER")
+ }
+ var n1 Node
+ n1.Op = OADDR
+ n1.Left = n
+ var n2 Node
+ Regalloc(&n2, Types[Tptr], res)
+ Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &n1, &n2)
+ Thearch.Gmove(&n2, res)
+ Regfree(&n2)
+ return
+ }
+
+ nl := n.Left
+
+ switch n.Op {
+ default:
+ Fatal("agen: unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ case OCALLMETH:
+ cgen_callmeth(n, 0)
+ cgen_aret(n, res)
+
+ case OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_aret(n, res)
+
+ case OCALLFUNC:
+ cgen_call(n, 0)
+ cgen_aret(n, res)
+
+ case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
+ var n1 Node
+ Tempname(&n1, n.Type)
+ Cgen_slice(n, &n1)
+ Agen(&n1, res)
+
+ case OEFACE:
+ var n1 Node
+ Tempname(&n1, n.Type)
+ Cgen_eface(n, &n1)
+ Agen(&n1, res)
+
+ case OINDEX:
+ var n1 Node
+ Agenr(n, &n1, res)
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+
+ case ONAME:
+ // should only get here with names in this func.
+ if n.Funcdepth > 0 && n.Funcdepth != Funcdepth {
+ Dump("bad agen", n)
+ Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, Funcdepth)
+ }
+
+ // should only get here for heap vars or paramref
+ if n.Class&PHEAP == 0 && n.Class != PPARAMREF {
+ Dump("bad agen", n)
+ Fatal("agen: bad ONAME class %#x", n.Class)
+ }
+
+ Cgen(n.Heapaddr, res)
+ if n.Xoffset != 0 {
+ addOffset(res, n.Xoffset)
+ }
+
+ case OIND:
+ Cgen(nl, res)
+ Cgen_checknil(res)
+
+ case ODOT:
+ Agen(nl, res)
+ if n.Xoffset != 0 {
+ addOffset(res, n.Xoffset)
+ }
+
+ case ODOTPTR:
+ Cgen(nl, res)
+ Cgen_checknil(res)
+ if n.Xoffset != 0 {
+ addOffset(res, n.Xoffset)
+ }
+ }
+}
+
+func addOffset(res *Node, offset int64) {
+ if Ctxt.Arch.Thechar == '6' || Ctxt.Arch.Thechar == '8' {
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), Nodintconst(offset), res)
+ return
+ }
+
+ var n1, n2 Node
+ Regalloc(&n1, Types[Tptr], nil)
+ Thearch.Gmove(res, &n1)
+ Regalloc(&n2, Types[Tptr], nil)
+ Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), Nodintconst(offset), &n2)
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n1)
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ Regfree(&n2)
+}
+
+/*
+ * generate:
+ * newreg = &n;
+ * res = newreg
+ *
+ * on exit, a has been changed to be *newreg.
+ * caller must Regfree(a).
+ * The generated code checks that the result is not *nil.
+ */
+func Igen(n *Node, a *Node, res *Node) {
+ if Debug['g'] != 0 {
+ Dump("\nigen-n", n)
+ }
+
+ switch n.Op {
+ case ONAME:
+ if (n.Class&PHEAP != 0) || n.Class == PPARAMREF {
+ break
+ }
+ *a = *n
+ return
+
+ case OINDREG:
+ // Increase the refcount of the register so that igen's caller
+ // has to call Regfree.
+ if n.Val.U.Reg != int16(Thearch.REGSP) {
+ reg[n.Val.U.Reg-int16(Thearch.REGMIN)]++
+ }
+ *a = *n
+ return
+
+ case ODOT:
+ Igen(n.Left, a, res)
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ Fixlargeoffset(a)
+ return
+
+ case ODOTPTR:
+ Cgenr(n.Left, a, res)
+ Cgen_checknil(a)
+ a.Op = OINDREG
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ Fixlargeoffset(a)
+ return
+
+ case OCALLFUNC,
+ OCALLMETH,
+ OCALLINTER:
+ switch n.Op {
+ case OCALLFUNC:
+ cgen_call(n, 0)
+
+ case OCALLMETH:
+ cgen_callmeth(n, 0)
+
+ case OCALLINTER:
+ cgen_callinter(n, nil, 0)
+ }
+
+ var flist Iter
+ fp := Structfirst(&flist, Getoutarg(n.Left.Type))
+ *a = Node{}
+ a.Op = OINDREG
+ a.Val.U.Reg = int16(Thearch.REGSP)
+ a.Addable = 1
+ a.Xoffset = fp.Width
+ if HasLinkRegister() {
+ a.Xoffset += int64(Ctxt.Arch.Ptrsize)
+ }
+ a.Type = n.Type
+ return
+
+ // Index of fixed-size array by constant can
+ // put the offset in the addressing.
+ // Could do the same for slice except that we need
+ // to use the real index for the bounds checking.
+ case OINDEX:
+ if Isfixedarray(n.Left.Type) || (Isptr[n.Left.Type.Etype] && Isfixedarray(n.Left.Left.Type)) {
+ if Isconst(n.Right, CTINT) {
+ // Compute &a.
+ if !Isptr[n.Left.Type.Etype] {
+ Igen(n.Left, a, res)
+ } else {
+ var n1 Node
+ Igen(n.Left, &n1, res)
+ Cgen_checknil(&n1)
+ Regalloc(a, Types[Tptr], res)
+ Thearch.Gmove(&n1, a)
+ Regfree(&n1)
+ a.Op = OINDREG
+ }
+
+ // Compute &a[i] as &a + i*width.
+ a.Type = n.Type
+
+ a.Xoffset += Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width
+ Fixlargeoffset(a)
+ return
+ }
+ }
+ }
+
+ Agenr(n, a, res)
+ a.Op = OINDREG
+ a.Type = n.Type
+}
+
+/*
+ * generate:
+ * if(n == true) goto to;
+ */
+func Bgen(n *Node, true_ bool, likely int, to *obj.Prog) {
+ if Debug['g'] != 0 {
+ Dump("\nbgen", n)
+ }
+
+ if n == nil {
+ n = Nodbool(true)
+ }
+
+ if n.Ninit != nil {
+ Genlist(n.Ninit)
+ }
+
+ if n.Type == nil {
+ Convlit(&n, Types[TBOOL])
+ if n.Type == nil {
+ return
+ }
+ }
+
+ et := int(n.Type.Etype)
+ if et != TBOOL {
+ Yyerror("cgen: bad type %v for %v", Tconv(n.Type, 0), Oconv(int(n.Op), 0))
+ Patch(Thearch.Gins(obj.AEND, nil, nil), to)
+ return
+ }
+
+ for n.Op == OCONVNOP {
+ n = n.Left
+ if n.Ninit != nil {
+ Genlist(n.Ninit)
+ }
+ }
+
+ if Thearch.Bgen_float != nil && n.Left != nil && Isfloat[n.Left.Type.Etype] {
+ Thearch.Bgen_float(n, bool2int(true_), likely, to)
+ return
+ }
+
+ var nl *Node
+ var nr *Node
+ switch n.Op {
+ default:
+ goto def
+
+ // need to ask if it is bool?
+ case OLITERAL:
+ if !true_ == (n.Val.U.Bval == 0) {
+ Patch(Gbranch(obj.AJMP, nil, likely), to)
+ }
+ return
+
+ case ONAME:
+ if n.Addable == 0 || Ctxt.Arch.Thechar == '5' || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
+ goto def
+ }
+ var n1 Node
+ Nodconst(&n1, n.Type, 0)
+ Thearch.Gins(Thearch.Optoas(OCMP, n.Type), n, &n1)
+ a := Thearch.Optoas(ONE, n.Type)
+ if !true_ {
+ a = Thearch.Optoas(OEQ, n.Type)
+ }
+ Patch(Gbranch(a, n.Type, likely), to)
+ return
+
+ case OANDAND, OOROR:
+ if (n.Op == OANDAND) == true_ {
+ p1 := Gbranch(obj.AJMP, nil, 0)
+ p2 := Gbranch(obj.AJMP, nil, 0)
+ Patch(p1, Pc)
+ Bgen(n.Left, !true_, -likely, p2)
+ Bgen(n.Right, !true_, -likely, p2)
+ p1 = Gbranch(obj.AJMP, nil, 0)
+ Patch(p1, to)
+ Patch(p2, Pc)
+ } else {
+ Bgen(n.Left, true_, likely, to)
+ Bgen(n.Right, true_, likely, to)
+ }
+
+ return
+
+ case OEQ, ONE, OLT, OGT, OLE, OGE:
+ nr = n.Right
+ if nr == nil || nr.Type == nil {
+ return
+ }
+ fallthrough
+
+ case ONOT: // unary
+ nl = n.Left
+
+ if nl == nil || nl.Type == nil {
+ return
+ }
+ }
+
+ switch n.Op {
+ case ONOT:
+ Bgen(nl, !true_, likely, to)
+ return
+
+ case OEQ, ONE, OLT, OGT, OLE, OGE:
+ a := int(n.Op)
+ if !true_ {
+ if Isfloat[nr.Type.Etype] {
+ // brcom is not valid on floats when NaN is involved.
+ p1 := Gbranch(obj.AJMP, nil, 0)
+ p2 := Gbranch(obj.AJMP, nil, 0)
+ Patch(p1, Pc)
+ ll := n.Ninit // avoid re-genning ninit
+ n.Ninit = nil
+ Bgen(n, true, -likely, p2)
+ n.Ninit = ll
+ Patch(Gbranch(obj.AJMP, nil, 0), to)
+ Patch(p2, Pc)
+ return
+ }
+
+ a = Brcom(a)
+ true_ = !true_
+ }
+
+ // make simplest on right
+ if nl.Op == OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < UINF) {
+ a = Brrev(a)
+ r := nl
+ nl = nr
+ nr = r
+ }
+
+ if Isslice(nl.Type) {
+ // front end should only leave cmp to literal nil
+ if (a != OEQ && a != ONE) || nr.Op != OLITERAL {
+ Yyerror("illegal slice comparison")
+ break
+ }
+
+ a = Thearch.Optoas(a, Types[Tptr])
+ var n1 Node
+ Igen(nl, &n1, nil)
+ n1.Xoffset += int64(Array_array)
+ n1.Type = Types[Tptr]
+ var n2 Node
+ Regalloc(&n2, Types[Tptr], &n1)
+ Cgen(&n1, &n2)
+ Regfree(&n1)
+ var tmp Node
+ Nodconst(&tmp, Types[Tptr], 0)
+ Thearch.Gins(Thearch.Optoas(OCMP, Types[Tptr]), &n2, &tmp)
+ Patch(Gbranch(a, Types[Tptr], likely), to)
+ Regfree(&n2)
+ break
+ }
+
+ if Isinter(nl.Type) {
+ // front end should only leave cmp to literal nil
+ if (a != OEQ && a != ONE) || nr.Op != OLITERAL {
+ Yyerror("illegal interface comparison")
+ break
+ }
+
+ a = Thearch.Optoas(a, Types[Tptr])
+ var n1 Node
+ Igen(nl, &n1, nil)
+ n1.Type = Types[Tptr]
+ var n2 Node
+ Regalloc(&n2, Types[Tptr], &n1)
+ Cgen(&n1, &n2)
+ Regfree(&n1)
+ var tmp Node
+ Nodconst(&tmp, Types[Tptr], 0)
+ Thearch.Gins(Thearch.Optoas(OCMP, Types[Tptr]), &n2, &tmp)
+ Patch(Gbranch(a, Types[Tptr], likely), to)
+ Regfree(&n2)
+ break
+ }
+
+ if Iscomplex[nl.Type.Etype] {
+ Complexbool(a, nl, nr, true_, likely, to)
+ break
+ }
+
+ if Ctxt.Arch.Regsize == 4 && Is64(nr.Type) {
+ if nl.Addable == 0 || Isconst(nl, CTINT) {
+ var n1 Node
+ Tempname(&n1, nl.Type)
+ Cgen(nl, &n1)
+ nl = &n1
+ }
+
+ if nr.Addable == 0 {
+ var n2 Node
+ Tempname(&n2, nr.Type)
+ Cgen(nr, &n2)
+ nr = &n2
+ }
+
+ Thearch.Cmp64(nl, nr, a, likely, to)
+ break
+ }
+
+ var n1 Node
+ var n2 Node
+ if nr.Ullman >= UINF {
+ Regalloc(&n1, nl.Type, nil)
+ Cgen(nl, &n1)
+
+ var tmp Node
+ Tempname(&tmp, nl.Type)
+ Thearch.Gmove(&n1, &tmp)
+ Regfree(&n1)
+
+ Regalloc(&n2, nr.Type, nil)
+ Cgen(nr, &n2)
+
+ Regalloc(&n1, nl.Type, nil)
+ Cgen(&tmp, &n1)
+
+ goto cmp
+ }
+
+ if nl.Addable == 0 && Ctxt.Arch.Thechar == '8' {
+ Tempname(&n1, nl.Type)
+ } else {
+ Regalloc(&n1, nl.Type, nil)
+ }
+ Cgen(nl, &n1)
+ nl = &n1
+
+ if Smallintconst(nr) && Ctxt.Arch.Thechar != '9' {
+ Thearch.Gins(Thearch.Optoas(OCMP, nr.Type), nl, nr)
+ Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
+ if n1.Op == OREGISTER {
+ Regfree(&n1)
+ }
+ break
+ }
+
+ if nr.Addable == 0 && Ctxt.Arch.Thechar == '8' {
+ var tmp Node
+ Tempname(&tmp, nr.Type)
+ Cgen(nr, &tmp)
+ nr = &tmp
+ }
+
+ Regalloc(&n2, nr.Type, nil)
+ Cgen(nr, &n2)
+ nr = &n2
+
+ cmp:
+ l, r := nl, nr
+ // On x86, only < and <= work right with NaN; reverse if needed
+ if Ctxt.Arch.Thechar == '6' && Isfloat[nl.Type.Etype] && (a == OGT || a == OGE) {
+ l, r = r, l
+ a = Brrev(a)
+ }
+
+ Thearch.Gins(Thearch.Optoas(OCMP, nr.Type), l, r)
+
+ if Ctxt.Arch.Thechar == '6' && Isfloat[nr.Type.Etype] && (n.Op == OEQ || n.Op == ONE) {
+ if n.Op == OEQ {
+ // neither NE nor P
+ p1 := Gbranch(Thearch.Optoas(ONE, nr.Type), nil, -likely)
+ p2 := Gbranch(Thearch.Optoas(OPS, nr.Type), nil, -likely)
+ Patch(Gbranch(obj.AJMP, nil, 0), to)
+ Patch(p1, Pc)
+ Patch(p2, Pc)
+ } else {
+ // either NE or P
+ Patch(Gbranch(Thearch.Optoas(ONE, nr.Type), nil, likely), to)
+ Patch(Gbranch(Thearch.Optoas(OPS, nr.Type), nil, likely), to)
+ }
+ } else if Ctxt.Arch.Thechar == '5' && Isfloat[nl.Type.Etype] {
+ if n.Op == ONE {
+ Patch(Gbranch(Thearch.Optoas(OPS, nr.Type), nr.Type, likely), to)
+ Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
+ } else {
+ p := Gbranch(Thearch.Optoas(OPS, nr.Type), nr.Type, -likely)
+ Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
+ Patch(p, Pc)
+ }
+ } else if (Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9') && Isfloat[nl.Type.Etype] && (a == OLE || a == OGE) {
+ // On arm64 and ppc64, <= and >= mishandle NaN. Must decompose into < or > and =.
+ if a == OLE {
+ a = OLT
+ } else {
+ a = OGT
+ }
+ Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
+ Patch(Gbranch(Thearch.Optoas(OEQ, nr.Type), nr.Type, likely), to)
+ } else {
+ Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
+ }
+ if n1.Op == OREGISTER {
+ Regfree(&n1)
+ }
+ if n2.Op == OREGISTER {
+ Regfree(&n2)
+ }
+ }
+
+ return
+
+def:
+ // TODO: Optimize on systems that can compare to zero easily.
+ var n1 Node
+ Regalloc(&n1, n.Type, nil)
+ Cgen(n, &n1)
+ var n2 Node
+ Nodconst(&n2, n.Type, 0)
+ Thearch.Gins(Thearch.Optoas(OCMP, n.Type), &n1, &n2)
+ a := Thearch.Optoas(ONE, n.Type)
+ if !true_ {
+ a = Thearch.Optoas(OEQ, n.Type)
+ }
+ Patch(Gbranch(a, n.Type, likely), to)
+ Regfree(&n1)
+ return
+}
+
+/*
+ * n is on stack, either local variable
+ * or return value from function call.
+ * return n's offset from SP.
+ */
+func stkof(n *Node) int64 {
+ switch n.Op {
+ case OINDREG:
+ return n.Xoffset
+
+ case ODOT:
+ t := n.Left.Type
+ if Isptr[t.Etype] {
+ break
+ }
+ off := stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ return off + n.Xoffset
+
+ case OINDEX:
+ t := n.Left.Type
+ if !Isfixedarray(t) {
+ break
+ }
+ off := stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ if Isconst(n.Right, CTINT) {
+ return off + t.Type.Width*Mpgetfix(n.Right.Val.U.Xval)
+ }
+ return 1000
+
+ case OCALLMETH, OCALLINTER, OCALLFUNC:
+ t := n.Left.Type
+ if Isptr[t.Etype] {
+ t = t.Type
+ }
+
+ var flist Iter
+ t = Structfirst(&flist, Getoutarg(t))
+ if t != nil {
+ w := t.Width
+ if HasLinkRegister() {
+ w += int64(Ctxt.Arch.Ptrsize)
+ }
+ return w
+ }
+ }
+
+ // botch - probably failing to recognize address
+ // arithmetic on the above. eg INDEX and DOT
+ return -1000
+}
+
+/*
+ * block copy:
+ * memmove(&ns, &n, w);
+ */
+func sgen(n *Node, ns *Node, w int64) {
+ if Debug['g'] != 0 {
+ fmt.Printf("\nsgen w=%d\n", w)
+ Dump("r", n)
+ Dump("res", ns)
+ }
+
+ if n.Ullman >= UINF && ns.Ullman >= UINF {
+ Fatal("sgen UINF")
+ }
+
+ if w < 0 {
+ Fatal("sgen copy %d", w)
+ }
+
+ // If copying .args, that's all the results, so record definition sites
+ // for them for the liveness analysis.
+ if ns.Op == ONAME && ns.Sym.Name == ".args" {
+ for l := Curfn.Dcl; l != nil; l = l.Next {
+ if l.N.Class == PPARAMOUT {
+ Gvardef(l.N)
+ }
+ }
+ }
+
+ // Avoid taking the address for simple enough types.
+ if Componentgen(n, ns) {
+ return
+ }
+
+ if w == 0 {
+ // evaluate side effects only
+ var nodr Node
+ Regalloc(&nodr, Types[Tptr], nil)
+ Agen(ns, &nodr)
+ Agen(n, &nodr)
+ Regfree(&nodr)
+ return
+ }
+
+ // offset on the stack
+ osrc := stkof(n)
+ odst := stkof(ns)
+
+ if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
+ // osrc and odst both on stack, and at least one is in
+ // an unknown position. Could generate code to test
+ // for forward/backward copy, but instead just copy
+ // to a temporary location first.
+ var tmp Node
+ Tempname(&tmp, n.Type)
+ sgen(n, &tmp, w)
+ sgen(&tmp, ns, w)
+ return
+ }
+
+ Thearch.Stackcopy(n, ns, osrc, odst, w)
+}
+
+/*
+ * generate:
+ * call f
+ * proc=-1 normal call but no return
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ * proc=3 normal call to C pointer (not Go func value)
+*/
+func Ginscall(f *Node, proc int) {
+ if f.Type != nil {
+ extra := int32(0)
+ if proc == 1 || proc == 2 {
+ extra = 2 * int32(Widthptr)
+ }
+ Setmaxarg(f.Type, extra)
+ }
+
+ switch proc {
+ default:
+ Fatal("Ginscall: bad proc %d", proc)
+
+ case 0, // normal call
+ -1: // normal call but no return
+ if f.Op == ONAME && f.Class == PFUNC {
+ if f == Deferreturn {
+ // Deferred calls will appear to be returning to
+ // the CALL deferreturn(SB) that we are about to emit.
+ // However, the stack trace code will show the line
+ // of the instruction byte before the return PC.
+ // To avoid that being an unrelated instruction,
+ // insert an actual hardware NOP that will have the right line number.
+ // This is different from obj.ANOP, which is a virtual no-op
+ // that doesn't make it into the instruction stream.
+ Thearch.Ginsnop()
+ }
+
+ p := Thearch.Gins(obj.ACALL, nil, f)
+ Afunclit(&p.To, f)
+ if proc == -1 || Noreturn(p) {
+ Thearch.Gins(obj.AUNDEF, nil, nil)
+ }
+ break
+ }
+
+ var reg Node
+ Nodreg(®, Types[Tptr], Thearch.REGCTXT)
+ var r1 Node
+ Nodreg(&r1, Types[Tptr], Thearch.REGCALLX)
+ Thearch.Gmove(f, ®)
+ reg.Op = OINDREG
+ Thearch.Gmove(®, &r1)
+ reg.Op = OREGISTER
+ Thearch.Gins(obj.ACALL, ®, &r1)
+
+ case 3: // normal call of c function pointer
+ Thearch.Gins(obj.ACALL, nil, f)
+
+ case 1, // call in new proc (go)
+ 2: // deferred call (defer)
+ var stk Node
+
+ // size of arguments at 0(SP)
+ stk.Op = OINDREG
+ stk.Val.U.Reg = int16(Thearch.REGSP)
+ stk.Xoffset = 0
+ if HasLinkRegister() {
+ stk.Xoffset += int64(Ctxt.Arch.Ptrsize)
+ }
+ Thearch.Ginscon(Thearch.Optoas(OAS, Types[Tptr]), int64(Argsize(f.Type)), &stk)
+
+ // FuncVal* at 8(SP)
+ stk.Xoffset = int64(Widthptr)
+ if HasLinkRegister() {
+ stk.Xoffset += int64(Ctxt.Arch.Ptrsize)
+ }
+
+ var reg Node
+ Nodreg(®, Types[Tptr], Thearch.REGCALLX2)
+ Thearch.Gmove(f, ®)
+ Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), ®, &stk)
+
+ if proc == 1 {
+ Ginscall(Newproc, 0)
+ } else {
+ if Hasdefer == 0 {
+ Fatal("hasdefer=0 but has defer")
+ }
+ Ginscall(Deferproc, 0)
+ }
+
+ if proc == 2 {
+ Nodreg(®, Types[TINT32], Thearch.REGRETURN)
+ Thearch.Gins(Thearch.Optoas(OCMP, Types[TINT32]), ®, Nodintconst(0))
+ p := Gbranch(Thearch.Optoas(OEQ, Types[TINT32]), nil, +1)
+ cgen_ret(nil)
+ Patch(p, Pc)
+ }
+ }
+}
+
+/*
+ * n is call to interface method.
+ * generate res = n.
+ */
+func cgen_callinter(n *Node, res *Node, proc int) {
+ i := n.Left
+ if i.Op != ODOTINTER {
+ Fatal("cgen_callinter: not ODOTINTER %v", Oconv(int(i.Op), 0))
+ }
+
+ f := i.Right // field
+ if f.Op != ONAME {
+ Fatal("cgen_callinter: not ONAME %v", Oconv(int(f.Op), 0))
+ }
+
+ i = i.Left // interface
+
+ if i.Addable == 0 {
+ var tmpi Node
+ Tempname(&tmpi, i.Type)
+ Cgen(i, &tmpi)
+ i = &tmpi
+ }
+
+ Genlist(n.List) // assign the args
+
+ // i is now addable, prepare an indirected
+ // register to hold its address.
+ var nodi Node
+ Igen(i, &nodi, res) // REG = &inter
+
+ var nodsp Node
+ Nodindreg(&nodsp, Types[Tptr], Thearch.REGSP)
+ nodsp.Xoffset = 0
+ if HasLinkRegister() {
+ nodsp.Xoffset += int64(Ctxt.Arch.Ptrsize)
+ }
+ if proc != 0 {
+ nodsp.Xoffset += 2 * int64(Widthptr) // leave room for size & fn
+ }
+ nodi.Type = Types[Tptr]
+ nodi.Xoffset += int64(Widthptr)
+ Cgen(&nodi, &nodsp) // {0, 8(nacl), or 16}(SP) = 8(REG) -- i.data
+
+ var nodo Node
+ Regalloc(&nodo, Types[Tptr], res)
+
+ nodi.Type = Types[Tptr]
+ nodi.Xoffset -= int64(Widthptr)
+ Cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
+ Regfree(&nodi)
+
+ var nodr Node
+ Regalloc(&nodr, Types[Tptr], &nodo)
+ if n.Left.Xoffset == BADWIDTH {
+ Fatal("cgen_callinter: badwidth")
+ }
+ Cgen_checknil(&nodo) // in case offset is huge
+ nodo.Op = OINDREG
+ nodo.Xoffset = n.Left.Xoffset + 3*int64(Widthptr) + 8
+ if proc == 0 {
+ // plain call: use direct c function pointer - more efficient
+ Cgen(&nodo, &nodr) // REG = 32+offset(REG) -- i.tab->fun[f]
+ proc = 3
+ } else {
+ // go/defer. generate go func value.
+ Agen(&nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
+ }
+
+ nodr.Type = n.Left.Type
+ Ginscall(&nodr, proc)
+
+ Regfree(&nodr)
+ Regfree(&nodo)
+}
+
+/*
+ * generate function call;
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ */
+func cgen_call(n *Node, proc int) {
+ if n == nil {
+ return
+ }
+
+ var afun Node
+ if n.Left.Ullman >= UINF {
+ // if name involves a fn call
+ // precompute the address of the fn
+ Tempname(&afun, Types[Tptr])
+
+ Cgen(n.Left, &afun)
+ }
+
+ Genlist(n.List) // assign the args
+ t := n.Left.Type
+
+ // call tempname pointer
+ if n.Left.Ullman >= UINF {
+ var nod Node
+ Regalloc(&nod, Types[Tptr], nil)
+ Cgen_as(&nod, &afun)
+ nod.Type = t
+ Ginscall(&nod, proc)
+ Regfree(&nod)
+ return
+ }
+
+ // call pointer
+ if n.Left.Op != ONAME || n.Left.Class != PFUNC {
+ var nod Node
+ Regalloc(&nod, Types[Tptr], nil)
+ Cgen_as(&nod, n.Left)
+ nod.Type = t
+ Ginscall(&nod, proc)
+ Regfree(&nod)
+ return
+ }
+
+ // call direct
+ n.Left.Method = 1
+
+ Ginscall(n.Left, proc)
+}
+
+func HasLinkRegister() bool {
+ c := Ctxt.Arch.Thechar
+ return c != '6' && c != '8'
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = return value from call.
+ */
+func cgen_callret(n *Node, res *Node) {
+ t := n.Left.Type
+ if t.Etype == TPTR32 || t.Etype == TPTR64 {
+ t = t.Type
+ }
+
+ var flist Iter
+ fp := Structfirst(&flist, Getoutarg(t))
+ if fp == nil {
+ Fatal("cgen_callret: nil")
+ }
+
+ var nod Node
+ nod.Op = OINDREG
+ nod.Val.U.Reg = int16(Thearch.REGSP)
+ nod.Addable = 1
+
+ nod.Xoffset = fp.Width
+ if HasLinkRegister() {
+ nod.Xoffset += int64(Ctxt.Arch.Ptrsize)
+ }
+ nod.Type = fp.Type
+ Cgen_as(res, &nod)
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = &return value from call.
+ */
+func cgen_aret(n *Node, res *Node) {
+ t := n.Left.Type
+ if Isptr[t.Etype] {
+ t = t.Type
+ }
+
+ var flist Iter
+ fp := Structfirst(&flist, Getoutarg(t))
+ if fp == nil {
+ Fatal("cgen_aret: nil")
+ }
+
+ var nod1 Node
+ nod1.Op = OINDREG
+ nod1.Val.U.Reg = int16(Thearch.REGSP)
+ nod1.Addable = 1
+ nod1.Xoffset = fp.Width
+ if HasLinkRegister() {
+ nod1.Xoffset += int64(Ctxt.Arch.Ptrsize)
+ }
+ nod1.Type = fp.Type
+
+ if res.Op != OREGISTER {
+ var nod2 Node
+ Regalloc(&nod2, Types[Tptr], res)
+ Agen(&nod1, &nod2)
+ Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &nod2, res)
+ Regfree(&nod2)
+ } else {
+ Agen(&nod1, res)
+ }
+}
+
+/*
+ * generate return.
+ * n->left is assignments to return values.
+ */
+func cgen_ret(n *Node) {
+ if n != nil {
+ Genlist(n.List) // copy out args
+ }
+ if Hasdefer != 0 {
+ Ginscall(Deferreturn, 0)
+ }
+ Genlist(Curfn.Exit)
+ p := Thearch.Gins(obj.ARET, nil, nil)
+ if n != nil && n.Op == ORETJMP {
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = Linksym(n.Left.Sym)
+ }
+}
+
+/*
+ * generate division according to op, one of:
+ * res = nl / nr
+ * res = nl % nr
+ */
+func cgen_div(op int, nl *Node, nr *Node, res *Node) {
+ var w int
+
+ // TODO(rsc): arm64 needs to support the relevant instructions
+ // in peep and optoas in order to enable this.
+ // TODO(rsc): ppc64 needs to support the relevant instructions
+ // in peep and optoas in order to enable this.
+ if nr.Op != OLITERAL || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
+ goto longdiv
+ }
+ w = int(nl.Type.Width * 8)
+
+ // Front end handled 32-bit division. We only need to handle 64-bit.
+ // try to do division by multiply by (2^w)/d
+ // see hacker's delight chapter 10
+ switch Simtype[nl.Type.Etype] {
+ default:
+ goto longdiv
+
+ case TUINT64:
+ var m Magic
+ m.W = w
+ m.Ud = uint64(Mpgetfix(nr.Val.U.Xval))
+ Umagic(&m)
+ if m.Bad != 0 {
+ break
+ }
+ if op == OMOD {
+ goto longmod
+ }
+
+ var n1 Node
+ Cgenr(nl, &n1, nil)
+ var n2 Node
+ Nodconst(&n2, nl.Type, int64(m.Um))
+ var n3 Node
+ Regalloc(&n3, nl.Type, res)
+ Thearch.Cgen_hmul(&n1, &n2, &n3)
+
+ if m.Ua != 0 {
+ // need to add numerator accounting for overflow
+ Thearch.Gins(Thearch.Optoas(OADD, nl.Type), &n1, &n3)
+
+ Nodconst(&n2, nl.Type, 1)
+ Thearch.Gins(Thearch.Optoas(ORROTC, nl.Type), &n2, &n3)
+ Nodconst(&n2, nl.Type, int64(m.S)-1)
+ Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n3)
+ } else {
+ Nodconst(&n2, nl.Type, int64(m.S))
+ Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n3) // shift dx
+ }
+
+ Thearch.Gmove(&n3, res)
+ Regfree(&n1)
+ Regfree(&n3)
+ return
+
+ case TINT64:
+ var m Magic
+ m.W = w
+ m.Sd = Mpgetfix(nr.Val.U.Xval)
+ Smagic(&m)
+ if m.Bad != 0 {
+ break
+ }
+ if op == OMOD {
+ goto longmod
+ }
+
+ var n1 Node
+ Cgenr(nl, &n1, res)
+ var n2 Node
+ Nodconst(&n2, nl.Type, m.Sm)
+ var n3 Node
+ Regalloc(&n3, nl.Type, nil)
+ Thearch.Cgen_hmul(&n1, &n2, &n3)
+
+ if m.Sm < 0 {
+ // need to add numerator
+ Thearch.Gins(Thearch.Optoas(OADD, nl.Type), &n1, &n3)
+ }
+
+ Nodconst(&n2, nl.Type, int64(m.S))
+ Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n3) // shift n3
+
+ Nodconst(&n2, nl.Type, int64(w)-1)
+
+ Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n1) // -1 iff num is neg
+ Thearch.Gins(Thearch.Optoas(OSUB, nl.Type), &n1, &n3) // added
+
+ if m.Sd < 0 {
+ // this could probably be removed
+ // by factoring it into the multiplier
+ Thearch.Gins(Thearch.Optoas(OMINUS, nl.Type), nil, &n3)
+ }
+
+ Thearch.Gmove(&n3, res)
+ Regfree(&n1)
+ Regfree(&n3)
+ return
+ }
+
+ goto longdiv
+
+ // division and mod using (slow) hardware instruction
+longdiv:
+ Thearch.Dodiv(op, nl, nr, res)
+
+ return
+
+ // mod using formula A%B = A-(A/B*B) but
+ // we know that there is a fast algorithm for A/B
+longmod:
+ var n1 Node
+ Regalloc(&n1, nl.Type, res)
+
+ Cgen(nl, &n1)
+ var n2 Node
+ Regalloc(&n2, nl.Type, nil)
+ cgen_div(ODIV, &n1, nr, &n2)
+ a := Thearch.Optoas(OMUL, nl.Type)
+ if w == 8 {
+ // use 2-operand 16-bit multiply
+ // because there is no 2-operand 8-bit multiply
+ a = Thearch.Optoas(OMUL, Types[TINT16]) // XXX was IMULW
+ }
+
+ if !Smallintconst(nr) {
+ var n3 Node
+ Regalloc(&n3, nl.Type, nil)
+ Cgen(nr, &n3)
+ Thearch.Gins(a, &n3, &n2)
+ Regfree(&n3)
+ } else {
+ Thearch.Gins(a, nr, &n2)
+ }
+ Thearch.Gins(Thearch.Optoas(OSUB, nl.Type), &n2, &n1)
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ Regfree(&n2)
+}
+
+func Fixlargeoffset(n *Node) {
+ if n == nil {
+ return
+ }
+ if n.Op != OINDREG {
+ return
+ }
+ if n.Val.U.Reg == int16(Thearch.REGSP) { // stack offset cannot be large
+ return
+ }
+ if n.Xoffset != int64(int32(n.Xoffset)) {
+ // offset too large, add to register instead.
+ a := *n
+
+ a.Op = OREGISTER
+ a.Type = Types[Tptr]
+ a.Xoffset = 0
+ Cgen_checknil(&a)
+ Thearch.Ginscon(Thearch.Optoas(OADD, Types[Tptr]), n.Xoffset, &a)
+ n.Xoffset = 0
+ }
+}
if nr != nil {
if nl.Ullman > nr.Ullman && nl.Addable == 0 {
Tempname(&tnl, nl.Type)
- Thearch.Cgen(nl, &tnl)
+ Cgen(nl, &tnl)
nl = &tnl
}
if nr.Addable == 0 {
var tnr Node
Tempname(&tnr, nr.Type)
- Thearch.Cgen(nr, &tnr)
+ Cgen(nr, &tnr)
nr = &tnr
}
}
if nl.Addable == 0 {
Tempname(&tnl, nl.Type)
- Thearch.Cgen(nl, &tnl)
+ Cgen(nl, &tnl)
nl = &tnl
}
true_ = !true_
}
- Thearch.Bgen(&na, true_, likely, to)
+ Bgen(&na, true_, likely, to)
}
// break addable nc-complex into nr-real and ni-imaginary
ra.Op = OMINUS
ra.Left = nl
ra.Type = nl.Type
- Thearch.Cgen(&ra, res)
+ Cgen(&ra, res)
}
// build and execute tree
ra.Left = &n1
ra.Right = &n3
ra.Type = n1.Type
- Thearch.Cgen(&ra, &n5)
+ Cgen(&ra, &n5)
ra = Node{}
ra.Op = uint8(op)
ra.Left = &n2
ra.Right = &n4
ra.Type = n2.Type
- Thearch.Cgen(&ra, &n6)
+ Cgen(&ra, &n6)
}
// build and execute tree
ra.Left = &rm1
ra.Right = &rm2
ra.Type = rm1.Type
- Thearch.Cgen(&ra, &tmp)
+ Cgen(&ra, &tmp)
// imag part
rm1 = Node{}
ra.Left = &rm1
ra.Right = &rm2
ra.Type = rm1.Type
- Thearch.Cgen(&ra, &n6)
+ Cgen(&ra, &n6)
// tmp ->real part
- Thearch.Cgen(&tmp, &n5)
+ Cgen(&tmp, &n5)
}
func nodfconst(n *Node, t *Type, fval *Mpflt) {
var n3 Node
subnode(&n3, &n4, t)
- Thearch.Cgen(&n1, &n3)
- Thearch.Cgen(&n2, &n4)
+ Cgen(&n1, &n3)
+ Cgen(&n2, &n4)
}
}
subnode(&n1, &n2, res)
var tmp Node
Tempname(&tmp, n1.Type)
- Thearch.Cgen(n.Left, &tmp)
- Thearch.Cgen(n.Right, &n2)
- Thearch.Cgen(&tmp, &n1)
+ Cgen(n.Left, &tmp)
+ Cgen(n.Right, &n2)
+ Cgen(&tmp, &n1)
return
}
var n2 Node
subnode(&n1, &n2, nl)
if n.Op == OREAL {
- Thearch.Cgen(&n1, res)
+ Cgen(&n1, res)
return
}
- Thearch.Cgen(&n2, res)
+ Cgen(&n2, res)
return
}
if res.Addable == 0 {
var n1 Node
- Thearch.Igen(res, &n1, nil)
- Thearch.Cgen(n, &n1)
- Thearch.Regfree(&n1)
+ Igen(res, &n1, nil)
+ Cgen(n, &n1)
+ Regfree(&n1)
return
}
OCALLMETH,
OCALLINTER:
var n1 Node
- Thearch.Igen(n, &n1, res)
+ Igen(n, &n1, res)
Complexmove(&n1, res)
- Thearch.Regfree(&n1)
+ Regfree(&n1)
return
case OCONV,
if nr != nil {
if nl.Ullman > nr.Ullman && nl.Addable == 0 {
Tempname(&tnl, nl.Type)
- Thearch.Cgen(nl, &tnl)
+ Cgen(nl, &tnl)
nl = &tnl
}
if nr.Addable == 0 {
var tnr Node
Tempname(&tnr, nr.Type)
- Thearch.Cgen(nr, &tnr)
+ Cgen(nr, &tnr)
nr = &tnr
}
}
if nl.Addable == 0 {
Tempname(&tnl, nl.Type)
- Thearch.Cgen(nl, &tnl)
+ Cgen(nl, &tnl)
nl = &tnl
}
Fatal("cgen_proc: unknown call %v", Oconv(int(n.Left.Op), 0))
case OCALLMETH:
- Cgen_callmeth(n.Left, proc)
+ cgen_callmeth(n.Left, proc)
case OCALLINTER:
- Thearch.Cgen_callinter(n.Left, nil, proc)
+ cgen_callinter(n.Left, nil, proc)
case OCALLFUNC:
- Thearch.Cgen_call(n.Left, proc)
+ cgen_call(n.Left, proc)
}
}
}
ullmancalc(&z)
- Thearch.Cgen(&z, n)
+ Cgen(&z, n)
}
/*
*/
tmp := temp(Types[Tptr])
- Thearch.Cgen(n.Right, tmp)
+ Cgen(n.Right, tmp)
Gvardef(res)
dst := *res
dst.Type = Types[Tptr]
dst.Xoffset += int64(Widthptr)
- Thearch.Cgen(tmp, &dst)
+ Cgen(tmp, &dst)
dst.Xoffset -= int64(Widthptr)
- Thearch.Cgen(n.Left, &dst)
+ Cgen(n.Left, &dst)
}
/*
var src Node
if isnil(n.Left) {
Tempname(&src, n.Left.Type)
- Thearch.Cgen(n.Left, &src)
+ Cgen(n.Left, &src)
} else {
src = *n.Left
}
if !Isptr[n.Left.Type.Etype] {
Fatal("slicearr is supposed to work on pointer: %v\n", Nconv(n, obj.FmtSign))
}
- Thearch.Cgen(&src, base)
+ Cgen(&src, base)
Cgen_checknil(base)
} else {
src.Type = Types[Tptr]
- Thearch.Cgen(&src, base)
+ Cgen(&src, base)
}
// committed to the update
// compute len and cap.
// len = n-i, cap = m-i, and offs = i*width.
// computing offs last lets the multiply overwrite i.
- Thearch.Cgen((*Node)(len), tmplen)
+ Cgen((*Node)(len), tmplen)
if n.Op != OSLICESTR {
- Thearch.Cgen(cap, tmpcap)
+ Cgen(cap, tmpcap)
}
// if new cap != 0 { base += add }
Nodconst(&con, tmpcap.Type, 0)
cmp := Nod(OEQ, tmpcap, &con)
typecheck(&cmp, Erv)
- Thearch.Bgen(cmp, true, -1, p2)
+ Bgen(cmp, true, -1, p2)
add := Nod(OADD, base, offs)
typecheck(&add, Erv)
- Thearch.Cgen(add, base)
+ Cgen(add, base)
Patch(p2, Pc)
}
dst.Xoffset += int64(Array_array)
dst.Type = Types[Tptr]
- Thearch.Cgen(base, &dst)
+ Cgen(base, &dst)
// dst.len = hi [ - lo ]
dst = *res
dst.Xoffset += int64(Array_nel)
dst.Type = Types[Simtype[TUINT]]
- Thearch.Cgen(tmplen, &dst)
+ Cgen(tmplen, &dst)
if n.Op != OSLICESTR {
// dst.cap = cap [ - lo ]
dst.Xoffset += int64(Array_cap)
dst.Type = Types[Simtype[TUINT]]
- Thearch.Cgen(tmpcap, &dst)
+ Cgen(tmpcap, &dst)
}
}
lno := setlineno(n)
- wasregalloc := Thearch.Anyregalloc()
+ wasregalloc := Anyregalloc()
if n == nil {
goto ret
lab.Continpc = continpc
}
- gen(n.Nincr) // contin: incr
- Patch(p1, Pc) // test:
- Thearch.Bgen(n.Ntest, false, -1, breakpc) // if(!test) goto break
- Genlist(n.Nbody) // body
+ gen(n.Nincr) // contin: incr
+ Patch(p1, Pc) // test:
+ Bgen(n.Ntest, false, -1, breakpc) // if(!test) goto break
+ Genlist(n.Nbody) // body
gjmp(continpc)
Patch(breakpc, Pc) // done:
continpc = scontin
}
case OIF:
- p1 := gjmp(nil) // goto test
- p2 := gjmp(nil) // p2: goto else
- Patch(p1, Pc) // test:
- Thearch.Bgen(n.Ntest, false, int(-n.Likely), p2) // if(!test) goto p2
- Genlist(n.Nbody) // then
- p3 := gjmp(nil) // goto done
- Patch(p2, Pc) // else:
- Genlist(n.Nelse) // else
- Patch(p3, Pc) // done:
+ p1 := gjmp(nil) // goto test
+ p2 := gjmp(nil) // p2: goto else
+ Patch(p1, Pc) // test:
+ Bgen(n.Ntest, false, int(-n.Likely), p2) // if(!test) goto p2
+ Genlist(n.Nbody) // then
+ p3 := gjmp(nil) // goto done
+ Patch(p2, Pc) // else:
+ Genlist(n.Nelse) // else
+ Patch(p3, Pc) // done:
case OSWITCH:
sbreak := breakpc
Cgen_as(n.Left, n.Right)
case OCALLMETH:
- Cgen_callmeth(n, 0)
+ cgen_callmeth(n, 0)
case OCALLINTER:
- Thearch.Cgen_callinter(n, nil, 0)
+ cgen_callinter(n, nil, 0)
case OCALLFUNC:
- Thearch.Cgen_call(n, 0)
+ cgen_call(n, 0)
case OPROC:
cgen_proc(n, 1)
case ORETURN,
ORETJMP:
- Thearch.Cgen_ret(n)
+ cgen_ret(n)
case OCHECKNIL:
Cgen_checknil(n.Left)
}
ret:
- if Thearch.Anyregalloc() != wasregalloc {
+ if Anyregalloc() != wasregalloc {
Dump("node", n)
Fatal("registers left allocated")
}
return
}
- Thearch.Cgen(nr, nl)
+ Cgen(nr, nl)
}
-func Cgen_callmeth(n *Node, proc int) {
+func cgen_callmeth(n *Node, proc int) {
// generate a rewrite in n2 for the method call
// (p.f)(...) goes to (f)(p,...)
if n2.Left.Op == ONAME {
n2.Left.Class = PFUNC
}
- Thearch.Cgen_call(&n2, proc)
+ cgen_call(&n2, proc)
}
func checklabels() {
if nr != nil && !cadable(nr) {
goto no
}
- Thearch.Igen(nl, &nodl, nil)
+ Igen(nl, &nodl, nil)
freel = 1
}
if nr != nil {
nodr = *nr
if !cadable(nr) {
- Thearch.Igen(nr, &nodr, nil)
+ Igen(nr, &nodr, nil)
freer = 1
}
} else {
var tmp Node
Nodconst(&tmp, nl.Type, 0)
- Thearch.Regalloc(&nodr, Types[TUINT], nil)
+ Regalloc(&nodr, Types[TUINT], nil)
Thearch.Gmove(&tmp, &nodr)
freer = 1
}
no:
if freer != 0 {
- Thearch.Regfree(&nodr)
+ Regfree(&nodr)
}
if freel != 0 {
- Thearch.Regfree(&nodl)
+ Regfree(&nodl)
}
return false
yes:
if freer != 0 {
- Thearch.Regfree(&nodr)
+ Regfree(&nodr)
}
if freel != 0 {
- Thearch.Regfree(&nodl)
+ Regfree(&nodl)
}
return true
}
)
type Arch struct {
- Thechar int
- Thestring string
- Thelinkarch *obj.LinkArch
- Typedefs []Typedef
- REGSP int
- REGCTXT int
- MAXWIDTH int64
- Anyregalloc func() bool
- Betypeinit func()
- Bgen func(*Node, bool, int, *obj.Prog)
- Cgen func(*Node, *Node)
- Cgen_call func(*Node, int)
- Cgen_callinter func(*Node, *Node, int)
- Cgen_ret func(*Node)
- Clearfat func(*Node)
- Defframe func(*obj.Prog)
- Excise func(*Flow)
- Expandchecks func(*obj.Prog)
- Gclean func()
- Ginit func()
- Gins func(int, *Node, *Node) *obj.Prog
- Ginscall func(*Node, int)
- Gmove func(*Node, *Node)
- Igen func(*Node, *Node, *Node)
- Linkarchinit func()
- Peep func(*obj.Prog)
- Proginfo func(*obj.Prog) // fills in Prog.Info
- Regalloc func(*Node, *Type, *Node)
- Regfree func(*Node)
- Regtyp func(*obj.Addr) bool
- Sameaddr func(*obj.Addr, *obj.Addr) bool
- Smallindir func(*obj.Addr, *obj.Addr) bool
- Stackaddr func(*obj.Addr) bool
- Excludedregs func() uint64
- RtoB func(int) uint64
- FtoB func(int) uint64
- BtoR func(uint64) int
- BtoF func(uint64) int
- Optoas func(int, *Type) int
- Doregbits func(int) uint64
- Regnames func(*int) []string
+ Thechar int
+ Thestring string
+ Thelinkarch *obj.LinkArch
+ Typedefs []Typedef
+ REGSP int
+ REGCTXT int
+ REGCALLX int // BX
+ REGCALLX2 int // AX
+ REGRETURN int // AX
+ REGMIN int
+ REGMAX int
+ FREGMIN int
+ FREGMAX int
+ MAXWIDTH int64
+ ReservedRegs []int
+
+ AddIndex func(*Node, int64, *Node) bool // optional
+ Betypeinit func()
+ Bgen_float func(*Node, int, int, *obj.Prog) // optional
+ Cgen64 func(*Node, *Node) // only on 32-bit systems
+ Cgenindex func(*Node, *Node, bool) *obj.Prog
+ Cgen_bmul func(int, *Node, *Node, *Node) bool
+ Cgen_float func(*Node, *Node) // optional
+ Cgen_hmul func(*Node, *Node, *Node)
+ Cgen_shift func(int, bool, *Node, *Node, *Node)
+ Clearfat func(*Node)
+ Cmp64 func(*Node, *Node, int, int, *obj.Prog) // only on 32-bit systems
+ Defframe func(*obj.Prog)
+ Dodiv func(int, *Node, *Node, *Node)
+ Excise func(*Flow)
+ Expandchecks func(*obj.Prog)
+ Gins func(int, *Node, *Node) *obj.Prog
+ Ginscon func(int, int64, *Node)
+ Ginsnop func()
+ Gmove func(*Node, *Node)
+ Igenindex func(*Node, *Node, bool) *obj.Prog
+ Linkarchinit func()
+ Peep func(*obj.Prog)
+ Proginfo func(*obj.Prog) // fills in Prog.Info
+ Regtyp func(*obj.Addr) bool
+ Sameaddr func(*obj.Addr, *obj.Addr) bool
+ Smallindir func(*obj.Addr, *obj.Addr) bool
+ Stackaddr func(*obj.Addr) bool
+ Stackcopy func(*Node, *Node, int64, int64, int64)
+ Sudoaddable func(int, *Node, *obj.Addr) bool
+ Sudoclean func()
+ Excludedregs func() uint64
+ RtoB func(int) uint64
+ FtoB func(int) uint64
+ BtoR func(uint64) int
+ BtoF func(uint64) int
+ Optoas func(int, *Type) int
+ Doregbits func(int) uint64
+ Regnames func(*int) []string
}
var pcloc int32
package gc
-import "cmd/internal/obj"
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "runtime"
+ "strings"
+)
var ddumped int
switch n.Op {
default:
a := a // copy to let escape into Ctxt.Dconv
+ Debug['h'] = 1
+ Dump("naddr", n)
Fatal("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
case OREGISTER:
n.Op = OINDREG
n.Val.U.Reg = int16(Thearch.REGSP)
- if Thearch.Thechar == '5' {
- n.Xoffset += 4
- }
- if Thearch.Thechar == '7' || Thearch.Thechar == '9' {
- n.Xoffset += 8
+ if HasLinkRegister() {
+ n.Xoffset += int64(Ctxt.Arch.Ptrsize)
}
case 1: // input arg
case 2: // offset output arg
Fatal("shouldn't be used")
-
- n.Op = OINDREG
- n.Val.U.Reg = int16(Thearch.REGSP)
- n.Xoffset += Types[Tptr].Width
}
n.Typecheck = 1
p.To.Offset = 0
return q
}
+
+var reg [100]int // count of references to reg
+var regstk [100][]byte // allocation sites, when -v is given
+
+func ginit() {
+ for r := range reg {
+ reg[r] = 1
+ }
+
+ for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
+ reg[r-Thearch.REGMIN] = 0
+ }
+ for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
+ reg[r-Thearch.REGMIN] = 0
+ }
+
+ for _, r := range Thearch.ReservedRegs {
+ reg[r-Thearch.REGMIN] = 1
+ }
+}
+
+func gclean() {
+ for _, r := range Thearch.ReservedRegs {
+ reg[r-Thearch.REGMIN]--
+ }
+
+ for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
+ n := reg[r-Thearch.REGMIN]
+ if n != 0 {
+ Yyerror("reg %v left allocated", obj.Rconv(r))
+ if Debug['v'] != 0 {
+ Regdump()
+ }
+ }
+ }
+
+ for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
+ n := reg[r-Thearch.REGMIN]
+ if n != 0 {
+ Yyerror("reg %v left allocated", obj.Rconv(r))
+ if Debug['v'] != 0 {
+ Regdump()
+ }
+ }
+ }
+}
+
+func Anyregalloc() bool {
+ n := 0
+ for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
+ if reg[r-Thearch.REGMIN] == 0 {
+ n++
+ }
+ }
+ return n > len(Thearch.ReservedRegs)
+}
+
+/*
+ * allocate register of type t, leave in n.
+ * if o != N, o may be reusable register.
+ * caller must Regfree(n).
+ */
+func Regalloc(n *Node, t *Type, o *Node) {
+ if t == nil {
+ Fatal("regalloc: t nil")
+ }
+ et := int(Simtype[t.Etype])
+ if Ctxt.Arch.Regsize == 4 && (et == TINT64 || et == TUINT64) {
+ Fatal("regalloc 64bit")
+ }
+
+ var i int
+Switch:
+ switch et {
+ default:
+ Fatal("regalloc: unknown type %v", Tconv(t, 0))
+
+ case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TPTR32, TPTR64, TBOOL:
+ if o != nil && o.Op == OREGISTER {
+ i = int(o.Val.U.Reg)
+ if Thearch.REGMIN <= i && i <= Thearch.REGMAX {
+ break Switch
+ }
+ }
+ for i = Thearch.REGMIN; i <= Thearch.REGMAX; i++ {
+ if reg[i-Thearch.REGMIN] == 0 {
+ break Switch
+ }
+ }
+ Flusherrors()
+ Regdump()
+ Fatal("out of fixed registers")
+
+ case TFLOAT32, TFLOAT64:
+ if o != nil && o.Op == OREGISTER {
+ i = int(o.Val.U.Reg)
+ if Thearch.FREGMIN <= i && i <= Thearch.FREGMAX {
+ break Switch
+ }
+ }
+ for i = Thearch.FREGMIN; i <= Thearch.FREGMAX; i++ {
+ if reg[i-Thearch.REGMIN] == 0 { // note: REGMIN, not FREGMIN
+ break Switch
+ }
+ }
+ Flusherrors()
+ Regdump()
+ Fatal("out of floating registers")
+
+ case TCOMPLEX64, TCOMPLEX128:
+ Tempname(n, t)
+ return
+ }
+
+ ix := i - Thearch.REGMIN
+ if reg[ix] == 0 && Debug['v'] > 0 {
+ if regstk[ix] == nil {
+ regstk[ix] = make([]byte, 4096)
+ }
+ stk := regstk[ix]
+ n := runtime.Stack(stk[:cap(stk)], false)
+ regstk[ix] = stk[:n]
+ }
+ reg[ix]++
+ Nodreg(n, t, i)
+}
+
+func Regfree(n *Node) {
+ if n.Op == ONAME {
+ return
+ }
+ if n.Op != OREGISTER && n.Op != OINDREG {
+ Fatal("regfree: not a register")
+ }
+ i := int(n.Val.U.Reg)
+ if i == Thearch.REGSP {
+ return
+ }
+ switch {
+ case Thearch.REGMIN <= i && i <= Thearch.REGMAX,
+ Thearch.FREGMIN <= i && i <= Thearch.FREGMAX:
+ // ok
+ default:
+ Fatal("regfree: reg out of range")
+ }
+
+ i -= Thearch.REGMIN
+ if reg[i] <= 0 {
+ Fatal("regfree: reg not allocated")
+ }
+ reg[i]--
+ if reg[i] == 0 {
+ regstk[i] = regstk[i][:0]
+ }
+}
+
+// Reginuse reports whether r is in use.
+func Reginuse(r int) bool {
+ switch {
+ case Thearch.REGMIN <= r && r <= Thearch.REGMAX,
+ Thearch.FREGMIN <= r && r <= Thearch.FREGMAX:
+ // ok
+ default:
+ Fatal("reginuse: reg out of range")
+ }
+
+ return reg[r-Thearch.REGMIN] > 0
+}
+
+// Regrealloc(n) undoes the effect of Regfree(n),
+// so that a register can be given up but then reclaimed.
+func Regrealloc(n *Node) {
+ if n.Op != OREGISTER && n.Op != OINDREG {
+ Fatal("regrealloc: not a register")
+ }
+ i := int(n.Val.U.Reg)
+ if i == Thearch.REGSP {
+ return
+ }
+ switch {
+ case Thearch.REGMIN <= i && i <= Thearch.REGMAX,
+ Thearch.FREGMIN <= i && i <= Thearch.FREGMAX:
+ // ok
+ default:
+ Fatal("regrealloc: reg out of range")
+ }
+
+ i -= Thearch.REGMIN
+ if reg[i] == 0 && Debug['v'] > 0 {
+ if regstk[i] == nil {
+ regstk[i] = make([]byte, 4096)
+ }
+ stk := regstk[i]
+ n := runtime.Stack(stk[:cap(stk)], false)
+ regstk[i] = stk[:n]
+ }
+ reg[i]++
+}
+
+func Regdump() {
+ if Debug['v'] == 0 {
+ fmt.Printf("run compiler with -v for register allocation sites\n")
+ return
+ }
+
+ dump := func(r int) {
+ stk := regstk[r-Thearch.REGMIN]
+ if len(stk) == 0 {
+ return
+ }
+ fmt.Printf("reg %v allocated at:\n", obj.Rconv(r))
+ fmt.Printf("\t%s\n", strings.Replace(strings.TrimSpace(string(stk)), "\n", "\n\t", -1))
+ }
+
+ for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
+ if reg[r-Thearch.REGMIN] != 0 {
+ dump(r)
+ }
+ }
+ for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
+ if reg[r-Thearch.REGMIN] == 0 {
+ dump(r)
+ }
+ }
+}
if ((Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && n.Op != OREGISTER) || n.Addable == 0 || n.Op == OLITERAL {
var reg Node
- Thearch.Regalloc(®, Types[Tptr], n)
- Thearch.Cgen(n, ®)
+ Regalloc(®, Types[Tptr], n)
+ Cgen(n, ®)
Thearch.Gins(obj.ACHECKNIL, ®, nil)
- Thearch.Regfree(®)
+ Regfree(®)
return
}
Afunclit(&ptxt.From, Curfn.Nname)
- Thearch.Ginit()
+ ginit()
gcargs = makefuncdatasym("gcargs·%d", obj.FUNCDATA_ArgsPointerMaps)
gclocals = makefuncdatasym("gclocals·%d", obj.FUNCDATA_LocalsPointerMaps)
Genlist(Curfn.Enter)
Genlist(Curfn.Nbody)
- Thearch.Gclean()
+ gclean()
checklabels()
if nerrors != 0 {
goto ret
}
if Curfn.Type.Outtuple != 0 {
- Thearch.Ginscall(throwreturn, 0)
+ Ginscall(throwreturn, 0)
}
- Thearch.Ginit()
+ ginit()
// TODO: Determine when the final cgen_ret can be omitted. Perhaps always?
- Thearch.Cgen_ret(nil)
+ cgen_ret(nil)
if Hasdefer != 0 {
// deferreturn pretends to have one uintptr argument.
}
}
- Thearch.Gclean()
+ gclean()
if nerrors != 0 {
goto ret
}
// Assume that stack variables with address not taken can be loaded multiple times
// from memory without being rechecked. Other variables need to be checked on
// each load.
-type NilVar struct {
-}
var killed int // f->data is either nil or &killed
}
}
- if Debug['v'] != 0 && strings.Contains(Curfn.Nname.Sym.Name, "Parse") {
+ if false && Debug['v'] != 0 && strings.Contains(Curfn.Nname.Sym.Name, "Parse") {
Warn("regions: %d\n", nregion)
}
if nregion >= MaxRgn {
OLROT // left rotate: AROL.
ORROTC // right rotate-carry: ARCR.
ORETJMP // return to other function
+ OPS // compare parity set (for x86 NaN check)
OEND
)
{ABL, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0},
{AB, C_NONE, C_NONE, C_ZOREG, 6, 4, 0, 0, 0},
{ABL, C_NONE, C_NONE, C_REG, 6, 4, 0, 0, 0},
+ {ABL, C_REG, C_NONE, C_REG, 6, 4, 0, 0, 0},
{ABL, C_NONE, C_NONE, C_ZOREG, 6, 4, 0, 0, 0},
{obj.ARET, C_NONE, C_NONE, C_REG, 6, 4, 0, 0, 0},
{obj.ARET, C_NONE, C_NONE, C_ZOREG, 6, 4, 0, 0, 0},
os.Exit(2)
}
-func linksetexp() {
+func init() {
for _, f := range strings.Split(goexperiment, ",") {
if f != "" {
addexp(f)
}
func Linknew(arch *LinkArch) *Link {
- linksetexp()
-
ctxt := new(Link)
ctxt.Hash = make(map[SymVer]*LSym)
ctxt.Arch = arch
}
}
+ // Rewrite MOVL/MOVQ $XXX(FP/SP) as LEAL/LEAQ.
+ if p.From.Type == obj.TYPE_ADDR && (ctxt.Arch.Thechar == '6' || p.From.Name != obj.NAME_EXTERN && p.From.Name != obj.NAME_STATIC) {
+ switch p.As {
+ case AMOVL:
+ p.As = ALEAL
+ p.From.Type = obj.TYPE_MEM
+ case AMOVQ:
+ p.As = ALEAQ
+ p.From.Type = obj.TYPE_MEM
+ }
+ }
+
if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
nacladdr(ctxt, p, &p.From3)
nacladdr(ctxt, p, &p.From)