]> Cypherpunks repositories - gostls13.git/commitdiff
cmd/compile/internal/mips64: copy cmd/compile/internal/ppc64
authorShenghou Ma <minux@golang.org>
Sun, 6 Sep 2015 00:32:40 +0000 (20:32 -0400)
committerMinux Ma <minux@golang.org>
Tue, 8 Sep 2015 22:40:16 +0000 (22:40 +0000)
Just a mechanical copy, no code changes.
This is to reduce code difference when adding the mips64 port.

Change-Id: Id06e975f414a7b09f4827167b30813b228a3bfaf
Reviewed-on: https://go-review.googlesource.com/14324
Reviewed-by: Ian Lance Taylor <iant@golang.org>
src/cmd/compile/internal/mips64/cgen.go [new file with mode: 0644]
src/cmd/compile/internal/mips64/galign.go [new file with mode: 0644]
src/cmd/compile/internal/mips64/ggen.go [new file with mode: 0644]
src/cmd/compile/internal/mips64/gsubr.go [new file with mode: 0644]
src/cmd/compile/internal/mips64/opt.go [new file with mode: 0644]
src/cmd/compile/internal/mips64/peep.go [new file with mode: 0644]
src/cmd/compile/internal/mips64/prog.go [new file with mode: 0644]
src/cmd/compile/internal/mips64/reg.go [new file with mode: 0644]

diff --git a/src/cmd/compile/internal/mips64/cgen.go b/src/cmd/compile/internal/mips64/cgen.go
new file mode 100644 (file)
index 0000000..4f3092c
--- /dev/null
@@ -0,0 +1,149 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+       "cmd/compile/internal/gc"
+       "cmd/internal/obj"
+       "cmd/internal/obj/ppc64"
+)
+
+func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
+       // determine alignment.
+       // want to avoid unaligned access, so have to use
+       // smaller operations for less aligned types.
+       // for example moving [4]byte must use 4 MOVB not 1 MOVW.
+       align := int(n.Type.Align)
+
+       var op int
+       switch align {
+       default:
+               gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
+
+       case 1:
+               op = ppc64.AMOVBU
+
+       case 2:
+               op = ppc64.AMOVHU
+
+       case 4:
+               op = ppc64.AMOVWZU // there is no lwau, only lwaux
+
+       case 8:
+               op = ppc64.AMOVDU
+       }
+
+       if w%int64(align) != 0 {
+               gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
+       }
+       c := int32(w / int64(align))
+
+       // if we are copying forward on the stack and
+       // the src and dst overlap, then reverse direction
+       dir := align
+
+       if osrc < odst && int64(odst) < int64(osrc)+w {
+               dir = -dir
+       }
+
+       var dst gc.Node
+       var src gc.Node
+       if n.Ullman >= res.Ullman {
+               gc.Agenr(n, &dst, res) // temporarily use dst
+               gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
+               gins(ppc64.AMOVD, &dst, &src)
+               if res.Op == gc.ONAME {
+                       gc.Gvardef(res)
+               }
+               gc.Agen(res, &dst)
+       } else {
+               if res.Op == gc.ONAME {
+                       gc.Gvardef(res)
+               }
+               gc.Agenr(res, &dst, res)
+               gc.Agenr(n, &src, nil)
+       }
+
+       var tmp gc.Node
+       gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
+
+       // set up end marker
+       var nend gc.Node
+
+       // move src and dest to the end of block if necessary
+       if dir < 0 {
+               if c >= 4 {
+                       gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
+                       gins(ppc64.AMOVD, &src, &nend)
+               }
+
+               p := gins(ppc64.AADD, nil, &src)
+               p.From.Type = obj.TYPE_CONST
+               p.From.Offset = w
+
+               p = gins(ppc64.AADD, nil, &dst)
+               p.From.Type = obj.TYPE_CONST
+               p.From.Offset = w
+       } else {
+               p := gins(ppc64.AADD, nil, &src)
+               p.From.Type = obj.TYPE_CONST
+               p.From.Offset = int64(-dir)
+
+               p = gins(ppc64.AADD, nil, &dst)
+               p.From.Type = obj.TYPE_CONST
+               p.From.Offset = int64(-dir)
+
+               if c >= 4 {
+                       gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
+                       p := gins(ppc64.AMOVD, &src, &nend)
+                       p.From.Type = obj.TYPE_ADDR
+                       p.From.Offset = w
+               }
+       }
+
+       // move
+       // TODO: enable duffcopy for larger copies.
+       if c >= 4 {
+               p := gins(op, &src, &tmp)
+               p.From.Type = obj.TYPE_MEM
+               p.From.Offset = int64(dir)
+               ploop := p
+
+               p = gins(op, &tmp, &dst)
+               p.To.Type = obj.TYPE_MEM
+               p.To.Offset = int64(dir)
+
+               p = gins(ppc64.ACMP, &src, &nend)
+
+               gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), ploop)
+               gc.Regfree(&nend)
+       } else {
+               // TODO(austin): Instead of generating ADD $-8,R8; ADD
+               // $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
+               // generate the offsets directly and eliminate the
+               // ADDs.  That will produce shorter, more
+               // pipeline-able code.
+               var p *obj.Prog
+               for {
+                       tmp14 := c
+                       c--
+                       if tmp14 <= 0 {
+                               break
+                       }
+
+                       p = gins(op, &src, &tmp)
+                       p.From.Type = obj.TYPE_MEM
+                       p.From.Offset = int64(dir)
+
+                       p = gins(op, &tmp, &dst)
+                       p.To.Type = obj.TYPE_MEM
+                       p.To.Offset = int64(dir)
+               }
+       }
+
+       gc.Regfree(&dst)
+       gc.Regfree(&src)
+       gc.Regfree(&tmp)
+}
diff --git a/src/cmd/compile/internal/mips64/galign.go b/src/cmd/compile/internal/mips64/galign.go
new file mode 100644 (file)
index 0000000..16509da
--- /dev/null
@@ -0,0 +1,100 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+       "cmd/compile/internal/gc"
+       "cmd/internal/obj"
+       "cmd/internal/obj/ppc64"
+)
+
+var thechar int = '9'
+
+var thestring string = "ppc64"
+
+var thelinkarch *obj.LinkArch
+
+func linkarchinit() {
+       thestring = obj.Getgoarch()
+       gc.Thearch.Thestring = thestring
+       if thestring == "ppc64le" {
+               thelinkarch = &ppc64.Linkppc64le
+       } else {
+               thelinkarch = &ppc64.Linkppc64
+       }
+       gc.Thearch.Thelinkarch = thelinkarch
+}
+
+var MAXWIDTH int64 = 1 << 50
+
+/*
+ * go declares several platform-specific type aliases:
+ * int, uint, and uintptr
+ */
+var typedefs = []gc.Typedef{
+       {"int", gc.TINT, gc.TINT64},
+       {"uint", gc.TUINT, gc.TUINT64},
+       {"uintptr", gc.TUINTPTR, gc.TUINT64},
+}
+
+func betypeinit() {
+       gc.Widthptr = 8
+       gc.Widthint = 8
+       gc.Widthreg = 8
+}
+
+func Main() {
+       gc.Thearch.Thechar = thechar
+       gc.Thearch.Thestring = thestring
+       gc.Thearch.Thelinkarch = thelinkarch
+       gc.Thearch.Typedefs = typedefs
+       gc.Thearch.REGSP = ppc64.REGSP
+       gc.Thearch.REGCTXT = ppc64.REGCTXT
+       gc.Thearch.REGCALLX = ppc64.REG_R3
+       gc.Thearch.REGCALLX2 = ppc64.REG_R4
+       gc.Thearch.REGRETURN = ppc64.REG_R3
+       gc.Thearch.REGMIN = ppc64.REG_R0
+       gc.Thearch.REGMAX = ppc64.REG_R31
+       gc.Thearch.FREGMIN = ppc64.REG_F0
+       gc.Thearch.FREGMAX = ppc64.REG_F31
+       gc.Thearch.MAXWIDTH = MAXWIDTH
+       gc.Thearch.ReservedRegs = resvd
+
+       gc.Thearch.Betypeinit = betypeinit
+       gc.Thearch.Cgen_hmul = cgen_hmul
+       gc.Thearch.Cgen_shift = cgen_shift
+       gc.Thearch.Clearfat = clearfat
+       gc.Thearch.Defframe = defframe
+       gc.Thearch.Dodiv = dodiv
+       gc.Thearch.Excise = excise
+       gc.Thearch.Expandchecks = expandchecks
+       gc.Thearch.Getg = getg
+       gc.Thearch.Gins = gins
+       gc.Thearch.Ginscmp = ginscmp
+       gc.Thearch.Ginscon = ginscon
+       gc.Thearch.Ginsnop = ginsnop
+       gc.Thearch.Gmove = gmove
+       gc.Thearch.Linkarchinit = linkarchinit
+       gc.Thearch.Peep = peep
+       gc.Thearch.Proginfo = proginfo
+       gc.Thearch.Regtyp = regtyp
+       gc.Thearch.Sameaddr = sameaddr
+       gc.Thearch.Smallindir = smallindir
+       gc.Thearch.Stackaddr = stackaddr
+       gc.Thearch.Blockcopy = blockcopy
+       gc.Thearch.Sudoaddable = sudoaddable
+       gc.Thearch.Sudoclean = sudoclean
+       gc.Thearch.Excludedregs = excludedregs
+       gc.Thearch.RtoB = RtoB
+       gc.Thearch.FtoB = RtoB
+       gc.Thearch.BtoR = BtoR
+       gc.Thearch.BtoF = BtoF
+       gc.Thearch.Optoas = optoas
+       gc.Thearch.Doregbits = doregbits
+       gc.Thearch.Regnames = regnames
+
+       gc.Main()
+       gc.Exit(0)
+}
diff --git a/src/cmd/compile/internal/mips64/ggen.go b/src/cmd/compile/internal/mips64/ggen.go
new file mode 100644 (file)
index 0000000..2779140
--- /dev/null
@@ -0,0 +1,564 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+       "cmd/compile/internal/gc"
+       "cmd/internal/obj"
+       "cmd/internal/obj/ppc64"
+       "fmt"
+)
+
+func defframe(ptxt *obj.Prog) {
+       var n *gc.Node
+
+       // fill in argument size, stack size
+       ptxt.To.Type = obj.TYPE_TEXTSIZE
+
+       ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
+       frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+       ptxt.To.Offset = int64(frame)
+
+       // insert code to zero ambiguously live variables
+       // so that the garbage collector only sees initialized values
+       // when it looks for pointers.
+       p := ptxt
+
+       hi := int64(0)
+       lo := hi
+
+       // iterate through declarations - they are sorted in decreasing xoffset order.
+       for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
+               n = l.N
+               if !n.Name.Needzero {
+                       continue
+               }
+               if n.Class != gc.PAUTO {
+                       gc.Fatalf("needzero class %d", n.Class)
+               }
+               if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
+                       gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+               }
+
+               if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
+                       // merge with range we already have
+                       lo = n.Xoffset
+
+                       continue
+               }
+
+               // zero old range
+               p = zerorange(p, int64(frame), lo, hi)
+
+               // set new range
+               hi = n.Xoffset + n.Type.Width
+
+               lo = n.Xoffset
+       }
+
+       // zero final range
+       zerorange(p, int64(frame), lo, hi)
+}
+
+func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
+       cnt := hi - lo
+       if cnt == 0 {
+               return p
+       }
+       if cnt < int64(4*gc.Widthptr) {
+               for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
+                       p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, 8+frame+lo+i)
+               }
+               // TODO(dfc): https://golang.org/issue/12108
+               // If DUFFZERO is used inside a tail call (see genwrapper) it will
+               // overwrite the link register.
+       } else if false && cnt <= int64(128*gc.Widthptr) {
+               p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
+               p.Reg = ppc64.REGSP
+               p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+               f := gc.Sysfunc("duffzero")
+               gc.Naddr(&p.To, f)
+               gc.Afunclit(&p.To, f)
+               p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+       } else {
+               p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
+               p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
+               p.Reg = ppc64.REGSP
+               p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
+               p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+               p.Reg = ppc64.REGRT1
+               p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
+               p1 := p
+               p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+               p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+               gc.Patch(p, p1)
+       }
+
+       return p
+}
+
+func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+       q := gc.Ctxt.NewProg()
+       gc.Clearp(q)
+       q.As = int16(as)
+       q.Lineno = p.Lineno
+       q.From.Type = int16(ftype)
+       q.From.Reg = int16(freg)
+       q.From.Offset = foffset
+       q.To.Type = int16(ttype)
+       q.To.Reg = int16(treg)
+       q.To.Offset = toffset
+       q.Link = p.Link
+       p.Link = q
+       return q
+}
+
+func ginsnop() {
+       var reg gc.Node
+       gc.Nodreg(&reg, gc.Types[gc.TINT], ppc64.REG_R0)
+       gins(ppc64.AOR, &reg, &reg)
+}
+
+var panicdiv *gc.Node
+
+/*
+ * generate division.
+ * generates one of:
+ *     res = nl / nr
+ *     res = nl % nr
+ * according to op.
+ */
+func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+       // Have to be careful about handling
+       // most negative int divided by -1 correctly.
+       // The hardware will generate undefined result.
+       // Also need to explicitly trap on division on zero,
+       // the hardware will silently generate undefined result.
+       // DIVW will leave unpredicable result in higher 32-bit,
+       // so always use DIVD/DIVDU.
+       t := nl.Type
+
+       t0 := t
+       check := 0
+       if gc.Issigned[t.Etype] {
+               check = 1
+               if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
+                       check = 0
+               } else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
+                       check = 0
+               }
+       }
+
+       if t.Width < 8 {
+               if gc.Issigned[t.Etype] {
+                       t = gc.Types[gc.TINT64]
+               } else {
+                       t = gc.Types[gc.TUINT64]
+               }
+               check = 0
+       }
+
+       a := optoas(gc.ODIV, t)
+
+       var tl gc.Node
+       gc.Regalloc(&tl, t0, nil)
+       var tr gc.Node
+       gc.Regalloc(&tr, t0, nil)
+       if nl.Ullman >= nr.Ullman {
+               gc.Cgen(nl, &tl)
+               gc.Cgen(nr, &tr)
+       } else {
+               gc.Cgen(nr, &tr)
+               gc.Cgen(nl, &tl)
+       }
+
+       if t != t0 {
+               // Convert
+               tl2 := tl
+
+               tr2 := tr
+               tl.Type = t
+               tr.Type = t
+               gmove(&tl2, &tl)
+               gmove(&tr2, &tr)
+       }
+
+       // Handle divide-by-zero panic.
+       p1 := gins(optoas(gc.OCMP, t), &tr, nil)
+
+       p1.To.Type = obj.TYPE_REG
+       p1.To.Reg = ppc64.REGZERO
+       p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+       if panicdiv == nil {
+               panicdiv = gc.Sysfunc("panicdivide")
+       }
+       gc.Ginscall(panicdiv, -1)
+       gc.Patch(p1, gc.Pc)
+
+       var p2 *obj.Prog
+       if check != 0 {
+               var nm1 gc.Node
+               gc.Nodconst(&nm1, t, -1)
+               gins(optoas(gc.OCMP, t), &tr, &nm1)
+               p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+               if op == gc.ODIV {
+                       // a / (-1) is -a.
+                       gins(optoas(gc.OMINUS, t), nil, &tl)
+
+                       gmove(&tl, res)
+               } else {
+                       // a % (-1) is 0.
+                       var nz gc.Node
+                       gc.Nodconst(&nz, t, 0)
+
+                       gmove(&nz, res)
+               }
+
+               p2 = gc.Gbranch(obj.AJMP, nil, 0)
+               gc.Patch(p1, gc.Pc)
+       }
+
+       p1 = gins(a, &tr, &tl)
+       if op == gc.ODIV {
+               gc.Regfree(&tr)
+               gmove(&tl, res)
+       } else {
+               // A%B = A-(A/B*B)
+               var tm gc.Node
+               gc.Regalloc(&tm, t, nil)
+
+               // patch div to use the 3 register form
+               // TODO(minux): add gins3?
+               p1.Reg = p1.To.Reg
+
+               p1.To.Reg = tm.Reg
+               gins(optoas(gc.OMUL, t), &tr, &tm)
+               gc.Regfree(&tr)
+               gins(optoas(gc.OSUB, t), &tm, &tl)
+               gc.Regfree(&tm)
+               gmove(&tl, res)
+       }
+
+       gc.Regfree(&tl)
+       if check != 0 {
+               gc.Patch(p2, gc.Pc)
+       }
+}
+
+/*
+ * generate high multiply:
+ *   res = (nl*nr) >> width
+ */
+func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
+       // largest ullman on left.
+       if nl.Ullman < nr.Ullman {
+               tmp := (*gc.Node)(nl)
+               nl = nr
+               nr = tmp
+       }
+
+       t := (*gc.Type)(nl.Type)
+       w := int(int(t.Width * 8))
+       var n1 gc.Node
+       gc.Cgenr(nl, &n1, res)
+       var n2 gc.Node
+       gc.Cgenr(nr, &n2, nil)
+       switch gc.Simtype[t.Etype] {
+       case gc.TINT8,
+               gc.TINT16,
+               gc.TINT32:
+               gins(optoas(gc.OMUL, t), &n2, &n1)
+               p := (*obj.Prog)(gins(ppc64.ASRAD, nil, &n1))
+               p.From.Type = obj.TYPE_CONST
+               p.From.Offset = int64(w)
+
+       case gc.TUINT8,
+               gc.TUINT16,
+               gc.TUINT32:
+               gins(optoas(gc.OMUL, t), &n2, &n1)
+               p := (*obj.Prog)(gins(ppc64.ASRD, nil, &n1))
+               p.From.Type = obj.TYPE_CONST
+               p.From.Offset = int64(w)
+
+       case gc.TINT64,
+               gc.TUINT64:
+               if gc.Issigned[t.Etype] {
+                       gins(ppc64.AMULHD, &n2, &n1)
+               } else {
+                       gins(ppc64.AMULHDU, &n2, &n1)
+               }
+
+       default:
+               gc.Fatalf("cgen_hmul %v", t)
+       }
+
+       gc.Cgen(&n1, res)
+       gc.Regfree(&n1)
+       gc.Regfree(&n2)
+}
+
+/*
+ * generate shift according to op, one of:
+ *     res = nl << nr
+ *     res = nl >> nr
+ */
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+       a := int(optoas(op, nl.Type))
+
+       if nr.Op == gc.OLITERAL {
+               var n1 gc.Node
+               gc.Regalloc(&n1, nl.Type, res)
+               gc.Cgen(nl, &n1)
+               sc := uint64(nr.Int())
+               if sc >= uint64(nl.Type.Width*8) {
+                       // large shift gets 2 shifts by width-1
+                       var n3 gc.Node
+                       gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+
+                       gins(a, &n3, &n1)
+                       gins(a, &n3, &n1)
+               } else {
+                       gins(a, nr, &n1)
+               }
+               gmove(&n1, res)
+               gc.Regfree(&n1)
+               return
+       }
+
+       if nl.Ullman >= gc.UINF {
+               var n4 gc.Node
+               gc.Tempname(&n4, nl.Type)
+               gc.Cgen(nl, &n4)
+               nl = &n4
+       }
+
+       if nr.Ullman >= gc.UINF {
+               var n5 gc.Node
+               gc.Tempname(&n5, nr.Type)
+               gc.Cgen(nr, &n5)
+               nr = &n5
+       }
+
+       // Allow either uint32 or uint64 as shift type,
+       // to avoid unnecessary conversion from uint32 to uint64
+       // just to do the comparison.
+       tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
+
+       if tcount.Etype < gc.TUINT32 {
+               tcount = gc.Types[gc.TUINT32]
+       }
+
+       var n1 gc.Node
+       gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
+       var n3 gc.Node
+       gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
+
+       var n2 gc.Node
+       gc.Regalloc(&n2, nl.Type, res)
+
+       if nl.Ullman >= nr.Ullman {
+               gc.Cgen(nl, &n2)
+               gc.Cgen(nr, &n1)
+               gmove(&n1, &n3)
+       } else {
+               gc.Cgen(nr, &n1)
+               gmove(&n1, &n3)
+               gc.Cgen(nl, &n2)
+       }
+
+       gc.Regfree(&n3)
+
+       // test and fix up large shifts
+       if !bounded {
+               gc.Nodconst(&n3, tcount, nl.Type.Width*8)
+               gins(optoas(gc.OCMP, tcount), &n1, &n3)
+               p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
+               if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
+                       gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+                       gins(a, &n3, &n2)
+               } else {
+                       gc.Nodconst(&n3, nl.Type, 0)
+                       gmove(&n3, &n2)
+               }
+
+               gc.Patch(p1, gc.Pc)
+       }
+
+       gins(a, &n1, &n2)
+
+       gmove(&n2, res)
+
+       gc.Regfree(&n1)
+       gc.Regfree(&n2)
+}
+
+func clearfat(nl *gc.Node) {
+       /* clear a fat object */
+       if gc.Debug['g'] != 0 {
+               fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
+       }
+
+       w := uint64(uint64(nl.Type.Width))
+
+       // Avoid taking the address for simple enough types.
+       if gc.Componentgen(nil, nl) {
+               return
+       }
+
+       c := uint64(w % 8) // bytes
+       q := uint64(w / 8) // dwords
+
+       if gc.Reginuse(ppc64.REGRT1) {
+               gc.Fatalf("%v in use during clearfat", obj.Rconv(ppc64.REGRT1))
+       }
+
+       var r0 gc.Node
+       gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO)
+       var dst gc.Node
+       gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1)
+       gc.Regrealloc(&dst)
+       gc.Agen(nl, &dst)
+
+       var boff uint64
+       if q > 128 {
+               p := gins(ppc64.ASUB, nil, &dst)
+               p.From.Type = obj.TYPE_CONST
+               p.From.Offset = 8
+
+               var end gc.Node
+               gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
+               p = gins(ppc64.AMOVD, &dst, &end)
+               p.From.Type = obj.TYPE_ADDR
+               p.From.Offset = int64(q * 8)
+
+               p = gins(ppc64.AMOVDU, &r0, &dst)
+               p.To.Type = obj.TYPE_MEM
+               p.To.Offset = 8
+               pl := (*obj.Prog)(p)
+
+               p = gins(ppc64.ACMP, &dst, &end)
+               gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
+
+               gc.Regfree(&end)
+
+               // The loop leaves R3 on the last zeroed dword
+               boff = 8
+               // TODO(dfc): https://golang.org/issue/12108
+               // If DUFFZERO is used inside a tail call (see genwrapper) it will
+               // overwrite the link register.
+       } else if false && q >= 4 {
+               p := gins(ppc64.ASUB, nil, &dst)
+               p.From.Type = obj.TYPE_CONST
+               p.From.Offset = 8
+               f := (*gc.Node)(gc.Sysfunc("duffzero"))
+               p = gins(obj.ADUFFZERO, nil, f)
+               gc.Afunclit(&p.To, f)
+
+               // 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s
+               p.To.Offset = int64(4 * (128 - q))
+
+               // duffzero leaves R3 on the last zeroed dword
+               boff = 8
+       } else {
+               var p *obj.Prog
+               for t := uint64(0); t < q; t++ {
+                       p = gins(ppc64.AMOVD, &r0, &dst)
+                       p.To.Type = obj.TYPE_MEM
+                       p.To.Offset = int64(8 * t)
+               }
+
+               boff = 8 * q
+       }
+
+       var p *obj.Prog
+       for t := uint64(0); t < c; t++ {
+               p = gins(ppc64.AMOVB, &r0, &dst)
+               p.To.Type = obj.TYPE_MEM
+               p.To.Offset = int64(t + boff)
+       }
+
+       gc.Regfree(&dst)
+}
+
+// Called after regopt and peep have run.
+// Expand CHECKNIL pseudo-op into actual nil pointer check.
+func expandchecks(firstp *obj.Prog) {
+       var p1 *obj.Prog
+       var p2 *obj.Prog
+
+       for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
+               if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
+                       fmt.Printf("expandchecks: %v\n", p)
+               }
+               if p.As != obj.ACHECKNIL {
+                       continue
+               }
+               if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
+                       gc.Warnl(int(p.Lineno), "generated nil check")
+               }
+               if p.From.Type != obj.TYPE_REG {
+                       gc.Fatalf("invalid nil check %v\n", p)
+               }
+
+               /*
+                       // check is
+                       //      TD $4, R0, arg (R0 is always zero)
+                       // eqv. to:
+                       //      tdeq r0, arg
+                       // NOTE: this needs special runtime support to make SIGTRAP recoverable.
+                       reg = p->from.reg;
+                       p->as = ATD;
+                       p->from = p->to = p->from3 = zprog.from;
+                       p->from.type = TYPE_CONST;
+                       p->from.offset = 4;
+                       p->from.reg = 0;
+                       p->reg = REGZERO;
+                       p->to.type = TYPE_REG;
+                       p->to.reg = reg;
+               */
+               // check is
+               //      CMP arg, R0
+               //      BNE 2(PC) [likely]
+               //      MOVD R0, 0(R0)
+               p1 = gc.Ctxt.NewProg()
+
+               p2 = gc.Ctxt.NewProg()
+               gc.Clearp(p1)
+               gc.Clearp(p2)
+               p1.Link = p2
+               p2.Link = p.Link
+               p.Link = p1
+               p1.Lineno = p.Lineno
+               p2.Lineno = p.Lineno
+               p1.Pc = 9999
+               p2.Pc = 9999
+               p.As = ppc64.ACMP
+               p.To.Type = obj.TYPE_REG
+               p.To.Reg = ppc64.REGZERO
+               p1.As = ppc64.ABNE
+
+               //p1->from.type = TYPE_CONST;
+               //p1->from.offset = 1; // likely
+               p1.To.Type = obj.TYPE_BRANCH
+
+               p1.To.Val = p2.Link
+
+               // crash by write to memory address 0.
+               p2.As = ppc64.AMOVD
+
+               p2.From.Type = obj.TYPE_REG
+               p2.From.Reg = ppc64.REGZERO
+               p2.To.Type = obj.TYPE_MEM
+               p2.To.Reg = ppc64.REGZERO
+               p2.To.Offset = 0
+       }
+}
+
+// res = runtime.getg()
+func getg(res *gc.Node) {
+       var n1 gc.Node
+       gc.Nodreg(&n1, res.Type, ppc64.REGG)
+       gmove(&n1, res)
+}
diff --git a/src/cmd/compile/internal/mips64/gsubr.go b/src/cmd/compile/internal/mips64/gsubr.go
new file mode 100644 (file)
index 0000000..4ef928c
--- /dev/null
@@ -0,0 +1,1031 @@
+// Derived from Inferno utils/6c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
+//
+//     Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//     Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//     Portions Copyright © 1997-1999 Vita Nuova Limited
+//     Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//     Portions Copyright © 2004,2006 Bruce Ellis
+//     Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//     Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//     Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import (
+       "cmd/compile/internal/big"
+       "cmd/compile/internal/gc"
+       "cmd/internal/obj"
+       "cmd/internal/obj/ppc64"
+       "fmt"
+)
+
+var resvd = []int{
+       ppc64.REGZERO,
+       ppc64.REGSP, // reserved for SP
+       // We need to preserve the C ABI TLS pointer because sigtramp
+       // may happen during C code and needs to access the g.  C
+       // clobbers REGG, so if Go were to clobber REGTLS, sigtramp
+       // won't know which convention to use.  By preserving REGTLS,
+       // we can just retrieve g from TLS when we aren't sure.
+       ppc64.REGTLS,
+
+       // TODO(austin): Consolidate REGTLS and REGG?
+       ppc64.REGG,
+       ppc64.REGTMP, // REGTMP
+       ppc64.FREGCVI,
+       ppc64.FREGZERO,
+       ppc64.FREGHALF,
+       ppc64.FREGONE,
+       ppc64.FREGTWO,
+}
+
+/*
+ * generate
+ *     as $c, n
+ */
+func ginscon(as int, c int64, n2 *gc.Node) {
+       var n1 gc.Node
+
+       gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+
+       if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) || n2.Op != gc.OREGISTER || as == ppc64.AMULLD {
+               // cannot have more than 16-bit of immediate in ADD, etc.
+               // instead, MOV into register first.
+               var ntmp gc.Node
+               gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+               rawgins(ppc64.AMOVD, &n1, &ntmp)
+               rawgins(as, &ntmp, n2)
+               gc.Regfree(&ntmp)
+               return
+       }
+
+       rawgins(as, &n1, n2)
+}
+
+/*
+ * generate
+ *     as n, $c (CMP/CMPU)
+ */
+func ginscon2(as int, n2 *gc.Node, c int64) {
+       var n1 gc.Node
+
+       gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+
+       switch as {
+       default:
+               gc.Fatalf("ginscon2")
+
+       case ppc64.ACMP:
+               if -ppc64.BIG <= c && c <= ppc64.BIG {
+                       rawgins(as, n2, &n1)
+                       return
+               }
+
+       case ppc64.ACMPU:
+               if 0 <= c && c <= 2*ppc64.BIG {
+                       rawgins(as, n2, &n1)
+                       return
+               }
+       }
+
+       // MOV n1 into register first
+       var ntmp gc.Node
+       gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+       rawgins(ppc64.AMOVD, &n1, &ntmp)
+       rawgins(as, n2, &ntmp)
+       gc.Regfree(&ntmp)
+}
+
+func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+       if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
+               // Reverse comparison to place constant last.
+               op = gc.Brrev(op)
+               n1, n2 = n2, n1
+       }
+
+       var r1, r2, g1, g2 gc.Node
+       gc.Regalloc(&r1, t, n1)
+       gc.Regalloc(&g1, n1.Type, &r1)
+       gc.Cgen(n1, &g1)
+       gmove(&g1, &r1)
+       if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) {
+               ginscon2(optoas(gc.OCMP, t), &r1, n2.Int())
+       } else {
+               gc.Regalloc(&r2, t, n2)
+               gc.Regalloc(&g2, n1.Type, &r2)
+               gc.Cgen(n2, &g2)
+               gmove(&g2, &r2)
+               rawgins(optoas(gc.OCMP, t), &r1, &r2)
+               gc.Regfree(&g2)
+               gc.Regfree(&r2)
+       }
+       gc.Regfree(&g1)
+       gc.Regfree(&r1)
+       return gc.Gbranch(optoas(op, t), nil, likely)
+}
+
+// set up nodes representing 2^63
+var (
+       bigi         gc.Node
+       bigf         gc.Node
+       bignodes_did bool
+)
+
+func bignodes() {
+       if bignodes_did {
+               return
+       }
+       bignodes_did = true
+
+       var i big.Int
+       i.SetInt64(1)
+       i.Lsh(&i, 63)
+
+       gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
+       bigi.SetBigInt(&i)
+
+       bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64])
+}
+
+/*
+ * generate move:
+ *     t = f
+ * hard part is conversions.
+ */
+func gmove(f *gc.Node, t *gc.Node) {
+       if gc.Debug['M'] != 0 {
+               fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
+       }
+
+       ft := int(gc.Simsimtype(f.Type))
+       tt := int(gc.Simsimtype(t.Type))
+       cvt := (*gc.Type)(t.Type)
+
+       if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
+               gc.Complexmove(f, t)
+               return
+       }
+
+       // cannot have two memory operands
+       var r2 gc.Node
+       var r1 gc.Node
+       var a int
+       if gc.Ismem(f) && gc.Ismem(t) {
+               goto hard
+       }
+
+       // convert constant to desired type
+       if f.Op == gc.OLITERAL {
+               var con gc.Node
+               switch tt {
+               default:
+                       f.Convconst(&con, t.Type)
+
+               case gc.TINT32,
+                       gc.TINT16,
+                       gc.TINT8:
+                       var con gc.Node
+                       f.Convconst(&con, gc.Types[gc.TINT64])
+                       var r1 gc.Node
+                       gc.Regalloc(&r1, con.Type, t)
+                       gins(ppc64.AMOVD, &con, &r1)
+                       gmove(&r1, t)
+                       gc.Regfree(&r1)
+                       return
+
+               case gc.TUINT32,
+                       gc.TUINT16,
+                       gc.TUINT8:
+                       var con gc.Node
+                       f.Convconst(&con, gc.Types[gc.TUINT64])
+                       var r1 gc.Node
+                       gc.Regalloc(&r1, con.Type, t)
+                       gins(ppc64.AMOVD, &con, &r1)
+                       gmove(&r1, t)
+                       gc.Regfree(&r1)
+                       return
+               }
+
+               f = &con
+               ft = tt // so big switch will choose a simple mov
+
+               // constants can't move directly to memory.
+               if gc.Ismem(t) {
+                       goto hard
+               }
+       }
+
+       // float constants come from memory.
+       //if(isfloat[tt])
+       //      goto hard;
+
+       // 64-bit immediates are also from memory.
+       //if(isint[tt])
+       //      goto hard;
+       //// 64-bit immediates are really 32-bit sign-extended
+       //// unless moving into a register.
+       //if(isint[tt]) {
+       //      if(mpcmpfixfix(con.val.u.xval, minintval[TINT32]) < 0)
+       //              goto hard;
+       //      if(mpcmpfixfix(con.val.u.xval, maxintval[TINT32]) > 0)
+       //              goto hard;
+       //}
+
+       // value -> value copy, only one memory operand.
+       // figure out the instruction to use.
+       // break out of switch for one-instruction gins.
+       // goto rdst for "destination must be register".
+       // goto hard for "convert to cvt type first".
+       // otherwise handle and return.
+
+       switch uint32(ft)<<16 | uint32(tt) {
+       default:
+               gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
+
+               /*
+                * integer copy and truncate
+                */
+       case gc.TINT8<<16 | gc.TINT8, // same size
+               gc.TUINT8<<16 | gc.TINT8,
+               gc.TINT16<<16 | gc.TINT8,
+               // truncate
+               gc.TUINT16<<16 | gc.TINT8,
+               gc.TINT32<<16 | gc.TINT8,
+               gc.TUINT32<<16 | gc.TINT8,
+               gc.TINT64<<16 | gc.TINT8,
+               gc.TUINT64<<16 | gc.TINT8:
+               a = ppc64.AMOVB
+
+       case gc.TINT8<<16 | gc.TUINT8, // same size
+               gc.TUINT8<<16 | gc.TUINT8,
+               gc.TINT16<<16 | gc.TUINT8,
+               // truncate
+               gc.TUINT16<<16 | gc.TUINT8,
+               gc.TINT32<<16 | gc.TUINT8,
+               gc.TUINT32<<16 | gc.TUINT8,
+               gc.TINT64<<16 | gc.TUINT8,
+               gc.TUINT64<<16 | gc.TUINT8:
+               a = ppc64.AMOVBZ
+
+       case gc.TINT16<<16 | gc.TINT16, // same size
+               gc.TUINT16<<16 | gc.TINT16,
+               gc.TINT32<<16 | gc.TINT16,
+               // truncate
+               gc.TUINT32<<16 | gc.TINT16,
+               gc.TINT64<<16 | gc.TINT16,
+               gc.TUINT64<<16 | gc.TINT16:
+               a = ppc64.AMOVH
+
+       case gc.TINT16<<16 | gc.TUINT16, // same size
+               gc.TUINT16<<16 | gc.TUINT16,
+               gc.TINT32<<16 | gc.TUINT16,
+               // truncate
+               gc.TUINT32<<16 | gc.TUINT16,
+               gc.TINT64<<16 | gc.TUINT16,
+               gc.TUINT64<<16 | gc.TUINT16:
+               a = ppc64.AMOVHZ
+
+       case gc.TINT32<<16 | gc.TINT32, // same size
+               gc.TUINT32<<16 | gc.TINT32,
+               gc.TINT64<<16 | gc.TINT32,
+               // truncate
+               gc.TUINT64<<16 | gc.TINT32:
+               a = ppc64.AMOVW
+
+       case gc.TINT32<<16 | gc.TUINT32, // same size
+               gc.TUINT32<<16 | gc.TUINT32,
+               gc.TINT64<<16 | gc.TUINT32,
+               gc.TUINT64<<16 | gc.TUINT32:
+               a = ppc64.AMOVWZ
+
+       case gc.TINT64<<16 | gc.TINT64, // same size
+               gc.TINT64<<16 | gc.TUINT64,
+               gc.TUINT64<<16 | gc.TINT64,
+               gc.TUINT64<<16 | gc.TUINT64:
+               a = ppc64.AMOVD
+
+               /*
+                * integer up-conversions
+                */
+       case gc.TINT8<<16 | gc.TINT16, // sign extend int8
+               gc.TINT8<<16 | gc.TUINT16,
+               gc.TINT8<<16 | gc.TINT32,
+               gc.TINT8<<16 | gc.TUINT32,
+               gc.TINT8<<16 | gc.TINT64,
+               gc.TINT8<<16 | gc.TUINT64:
+               a = ppc64.AMOVB
+
+               goto rdst
+
+       case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
+               gc.TUINT8<<16 | gc.TUINT16,
+               gc.TUINT8<<16 | gc.TINT32,
+               gc.TUINT8<<16 | gc.TUINT32,
+               gc.TUINT8<<16 | gc.TINT64,
+               gc.TUINT8<<16 | gc.TUINT64:
+               a = ppc64.AMOVBZ
+
+               goto rdst
+
+       case gc.TINT16<<16 | gc.TINT32, // sign extend int16
+               gc.TINT16<<16 | gc.TUINT32,
+               gc.TINT16<<16 | gc.TINT64,
+               gc.TINT16<<16 | gc.TUINT64:
+               a = ppc64.AMOVH
+
+               goto rdst
+
+       case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
+               gc.TUINT16<<16 | gc.TUINT32,
+               gc.TUINT16<<16 | gc.TINT64,
+               gc.TUINT16<<16 | gc.TUINT64:
+               a = ppc64.AMOVHZ
+
+               goto rdst
+
+       case gc.TINT32<<16 | gc.TINT64, // sign extend int32
+               gc.TINT32<<16 | gc.TUINT64:
+               a = ppc64.AMOVW
+
+               goto rdst
+
+       case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
+               gc.TUINT32<<16 | gc.TUINT64:
+               a = ppc64.AMOVWZ
+
+               goto rdst
+
+               //warn("gmove: convert float to int not implemented: %N -> %N\n", f, t);
+       //return;
+       // algorithm is:
+       //      if small enough, use native float64 -> int64 conversion.
+       //      otherwise, subtract 2^63, convert, and add it back.
+       /*
+       * float to integer
+        */
+       case gc.TFLOAT32<<16 | gc.TINT32,
+               gc.TFLOAT64<<16 | gc.TINT32,
+               gc.TFLOAT32<<16 | gc.TINT64,
+               gc.TFLOAT64<<16 | gc.TINT64,
+               gc.TFLOAT32<<16 | gc.TINT16,
+               gc.TFLOAT32<<16 | gc.TINT8,
+               gc.TFLOAT32<<16 | gc.TUINT16,
+               gc.TFLOAT32<<16 | gc.TUINT8,
+               gc.TFLOAT64<<16 | gc.TINT16,
+               gc.TFLOAT64<<16 | gc.TINT8,
+               gc.TFLOAT64<<16 | gc.TUINT16,
+               gc.TFLOAT64<<16 | gc.TUINT8,
+               gc.TFLOAT32<<16 | gc.TUINT32,
+               gc.TFLOAT64<<16 | gc.TUINT32,
+               gc.TFLOAT32<<16 | gc.TUINT64,
+               gc.TFLOAT64<<16 | gc.TUINT64:
+               bignodes()
+
+               var r1 gc.Node
+               gc.Regalloc(&r1, gc.Types[ft], f)
+               gmove(f, &r1)
+               if tt == gc.TUINT64 {
+                       gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
+                       gmove(&bigf, &r2)
+                       gins(ppc64.AFCMPU, &r1, &r2)
+                       p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1))
+                       gins(ppc64.AFSUB, &r2, &r1)
+                       gc.Patch(p1, gc.Pc)
+                       gc.Regfree(&r2)
+               }
+
+               gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
+               var r3 gc.Node
+               gc.Regalloc(&r3, gc.Types[gc.TINT64], t)
+               gins(ppc64.AFCTIDZ, &r1, &r2)
+               p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil))
+               p1.To.Type = obj.TYPE_MEM
+               p1.To.Reg = ppc64.REGSP
+               p1.To.Offset = -8
+               p1 = gins(ppc64.AMOVD, nil, &r3)
+               p1.From.Type = obj.TYPE_MEM
+               p1.From.Reg = ppc64.REGSP
+               p1.From.Offset = -8
+               gc.Regfree(&r2)
+               gc.Regfree(&r1)
+               if tt == gc.TUINT64 {
+                       p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again
+                       gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
+                       gins(ppc64.AMOVD, &bigi, &r1)
+                       gins(ppc64.AADD, &r1, &r3)
+                       gc.Patch(p1, gc.Pc)
+               }
+
+               gmove(&r3, t)
+               gc.Regfree(&r3)
+               return
+
+               //warn("gmove: convert int to float not implemented: %N -> %N\n", f, t);
+       //return;
+       // algorithm is:
+       //      if small enough, use native int64 -> uint64 conversion.
+       //      otherwise, halve (rounding to odd?), convert, and double.
+       /*
+        * integer to float
+        */
+       case gc.TINT32<<16 | gc.TFLOAT32,
+               gc.TINT32<<16 | gc.TFLOAT64,
+               gc.TINT64<<16 | gc.TFLOAT32,
+               gc.TINT64<<16 | gc.TFLOAT64,
+               gc.TINT16<<16 | gc.TFLOAT32,
+               gc.TINT16<<16 | gc.TFLOAT64,
+               gc.TINT8<<16 | gc.TFLOAT32,
+               gc.TINT8<<16 | gc.TFLOAT64,
+               gc.TUINT16<<16 | gc.TFLOAT32,
+               gc.TUINT16<<16 | gc.TFLOAT64,
+               gc.TUINT8<<16 | gc.TFLOAT32,
+               gc.TUINT8<<16 | gc.TFLOAT64,
+               gc.TUINT32<<16 | gc.TFLOAT32,
+               gc.TUINT32<<16 | gc.TFLOAT64,
+               gc.TUINT64<<16 | gc.TFLOAT32,
+               gc.TUINT64<<16 | gc.TFLOAT64:
+               bignodes()
+
+               var r1 gc.Node
+               gc.Regalloc(&r1, gc.Types[gc.TINT64], nil)
+               gmove(f, &r1)
+               if ft == gc.TUINT64 {
+                       gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP)
+                       gmove(&bigi, &r2)
+                       gins(ppc64.ACMPU, &r1, &r2)
+                       p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1))
+                       p2 := (*obj.Prog)(gins(ppc64.ASRD, nil, &r1))
+                       p2.From.Type = obj.TYPE_CONST
+                       p2.From.Offset = 1
+                       gc.Patch(p1, gc.Pc)
+               }
+
+               gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t)
+               p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil))
+               p1.To.Type = obj.TYPE_MEM
+               p1.To.Reg = ppc64.REGSP
+               p1.To.Offset = -8
+               p1 = gins(ppc64.AFMOVD, nil, &r2)
+               p1.From.Type = obj.TYPE_MEM
+               p1.From.Reg = ppc64.REGSP
+               p1.From.Offset = -8
+               gins(ppc64.AFCFID, &r2, &r2)
+               gc.Regfree(&r1)
+               if ft == gc.TUINT64 {
+                       p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again
+                       gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO)
+                       gins(ppc64.AFMUL, &r1, &r2)
+                       gc.Patch(p1, gc.Pc)
+               }
+
+               gmove(&r2, t)
+               gc.Regfree(&r2)
+               return
+
+               /*
+                * float to float
+                */
+       case gc.TFLOAT32<<16 | gc.TFLOAT32:
+               a = ppc64.AFMOVS
+
+       case gc.TFLOAT64<<16 | gc.TFLOAT64:
+               a = ppc64.AFMOVD
+
+       case gc.TFLOAT32<<16 | gc.TFLOAT64:
+               a = ppc64.AFMOVS
+               goto rdst
+
+       case gc.TFLOAT64<<16 | gc.TFLOAT32:
+               a = ppc64.AFRSP
+               goto rdst
+       }
+
+       gins(a, f, t)
+       return
+
+       // requires register destination
+rdst:
+       {
+               gc.Regalloc(&r1, t.Type, t)
+
+               gins(a, f, &r1)
+               gmove(&r1, t)
+               gc.Regfree(&r1)
+               return
+       }
+
+       // requires register intermediate
+hard:
+       gc.Regalloc(&r1, cvt, t)
+
+       gmove(f, &r1)
+       gmove(&r1, t)
+       gc.Regfree(&r1)
+       return
+}
+
+func intLiteral(n *gc.Node) (x int64, ok bool) {
+       switch {
+       case n == nil:
+               return
+       case gc.Isconst(n, gc.CTINT):
+               return n.Int(), true
+       case gc.Isconst(n, gc.CTBOOL):
+               return int64(obj.Bool2int(n.Bool())), true
+       }
+       return
+}
+
+// gins is called by the front end.
+// It synthesizes some multiple-instruction sequences
+// so the front end can stay simpler.
+func gins(as int, f, t *gc.Node) *obj.Prog {
+       if as >= obj.A_ARCHSPECIFIC {
+               if x, ok := intLiteral(f); ok {
+                       ginscon(as, x, t)
+                       return nil // caller must not use
+               }
+       }
+       if as == ppc64.ACMP || as == ppc64.ACMPU {
+               if x, ok := intLiteral(t); ok {
+                       ginscon2(as, f, x)
+                       return nil // caller must not use
+               }
+       }
+       return rawgins(as, f, t)
+}
+
+/*
+ * generate one instruction:
+ *     as f, t
+ */
+func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+       // TODO(austin): Add self-move test like in 6g (but be careful
+       // of truncation moves)
+
+       p := gc.Prog(as)
+       gc.Naddr(&p.From, f)
+       gc.Naddr(&p.To, t)
+
+       switch as {
+       case obj.ACALL:
+               if p.To.Type == obj.TYPE_REG && p.To.Reg != ppc64.REG_CTR {
+                       // Allow front end to emit CALL REG, and rewrite into MOV REG, CTR; CALL CTR.
+                       pp := gc.Prog(as)
+                       pp.From = p.From
+                       pp.To.Type = obj.TYPE_REG
+                       pp.To.Reg = ppc64.REG_CTR
+
+                       p.As = ppc64.AMOVD
+                       p.From = p.To
+                       p.To.Type = obj.TYPE_REG
+                       p.To.Reg = ppc64.REG_CTR
+
+                       if gc.Debug['g'] != 0 {
+                               fmt.Printf("%v\n", p)
+                               fmt.Printf("%v\n", pp)
+                       }
+
+                       return pp
+               }
+
+       // Bad things the front end has done to us. Crash to find call stack.
+       case ppc64.AAND, ppc64.AMULLD:
+               if p.From.Type == obj.TYPE_CONST {
+                       gc.Debug['h'] = 1
+                       gc.Fatalf("bad inst: %v", p)
+               }
+       case ppc64.ACMP, ppc64.ACMPU:
+               if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
+                       gc.Debug['h'] = 1
+                       gc.Fatalf("bad inst: %v", p)
+               }
+       }
+
+       if gc.Debug['g'] != 0 {
+               fmt.Printf("%v\n", p)
+       }
+
+       w := int32(0)
+       switch as {
+       case ppc64.AMOVB,
+               ppc64.AMOVBU,
+               ppc64.AMOVBZ,
+               ppc64.AMOVBZU:
+               w = 1
+
+       case ppc64.AMOVH,
+               ppc64.AMOVHU,
+               ppc64.AMOVHZ,
+               ppc64.AMOVHZU:
+               w = 2
+
+       case ppc64.AMOVW,
+               ppc64.AMOVWU,
+               ppc64.AMOVWZ,
+               ppc64.AMOVWZU:
+               w = 4
+
+       case ppc64.AMOVD,
+               ppc64.AMOVDU:
+               if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR {
+                       break
+               }
+               w = 8
+       }
+
+       if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) {
+               gc.Dump("f", f)
+               gc.Dump("t", t)
+               gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
+       }
+
+       return p
+}
+
+/*
+ * return Axxx for Oxxx on type t.
+ */
+func optoas(op int, t *gc.Type) int {
+       if t == nil {
+               gc.Fatalf("optoas: t is nil")
+       }
+
+       a := int(obj.AXXX)
+       switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
+       default:
+               gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
+
+       case gc.OEQ<<16 | gc.TBOOL,
+               gc.OEQ<<16 | gc.TINT8,
+               gc.OEQ<<16 | gc.TUINT8,
+               gc.OEQ<<16 | gc.TINT16,
+               gc.OEQ<<16 | gc.TUINT16,
+               gc.OEQ<<16 | gc.TINT32,
+               gc.OEQ<<16 | gc.TUINT32,
+               gc.OEQ<<16 | gc.TINT64,
+               gc.OEQ<<16 | gc.TUINT64,
+               gc.OEQ<<16 | gc.TPTR32,
+               gc.OEQ<<16 | gc.TPTR64,
+               gc.OEQ<<16 | gc.TFLOAT32,
+               gc.OEQ<<16 | gc.TFLOAT64:
+               a = ppc64.ABEQ
+
+       case gc.ONE<<16 | gc.TBOOL,
+               gc.ONE<<16 | gc.TINT8,
+               gc.ONE<<16 | gc.TUINT8,
+               gc.ONE<<16 | gc.TINT16,
+               gc.ONE<<16 | gc.TUINT16,
+               gc.ONE<<16 | gc.TINT32,
+               gc.ONE<<16 | gc.TUINT32,
+               gc.ONE<<16 | gc.TINT64,
+               gc.ONE<<16 | gc.TUINT64,
+               gc.ONE<<16 | gc.TPTR32,
+               gc.ONE<<16 | gc.TPTR64,
+               gc.ONE<<16 | gc.TFLOAT32,
+               gc.ONE<<16 | gc.TFLOAT64:
+               a = ppc64.ABNE
+
+       case gc.OLT<<16 | gc.TINT8, // ACMP
+               gc.OLT<<16 | gc.TINT16,
+               gc.OLT<<16 | gc.TINT32,
+               gc.OLT<<16 | gc.TINT64,
+               gc.OLT<<16 | gc.TUINT8,
+               // ACMPU
+               gc.OLT<<16 | gc.TUINT16,
+               gc.OLT<<16 | gc.TUINT32,
+               gc.OLT<<16 | gc.TUINT64,
+               gc.OLT<<16 | gc.TFLOAT32,
+               // AFCMPU
+               gc.OLT<<16 | gc.TFLOAT64:
+               a = ppc64.ABLT
+
+       case gc.OLE<<16 | gc.TINT8, // ACMP
+               gc.OLE<<16 | gc.TINT16,
+               gc.OLE<<16 | gc.TINT32,
+               gc.OLE<<16 | gc.TINT64,
+               gc.OLE<<16 | gc.TUINT8,
+               // ACMPU
+               gc.OLE<<16 | gc.TUINT16,
+               gc.OLE<<16 | gc.TUINT32,
+               gc.OLE<<16 | gc.TUINT64:
+               // No OLE for floats, because it mishandles NaN.
+               // Front end must reverse comparison or use OLT and OEQ together.
+               a = ppc64.ABLE
+
+       case gc.OGT<<16 | gc.TINT8,
+               gc.OGT<<16 | gc.TINT16,
+               gc.OGT<<16 | gc.TINT32,
+               gc.OGT<<16 | gc.TINT64,
+               gc.OGT<<16 | gc.TUINT8,
+               gc.OGT<<16 | gc.TUINT16,
+               gc.OGT<<16 | gc.TUINT32,
+               gc.OGT<<16 | gc.TUINT64,
+               gc.OGT<<16 | gc.TFLOAT32,
+               gc.OGT<<16 | gc.TFLOAT64:
+               a = ppc64.ABGT
+
+       case gc.OGE<<16 | gc.TINT8,
+               gc.OGE<<16 | gc.TINT16,
+               gc.OGE<<16 | gc.TINT32,
+               gc.OGE<<16 | gc.TINT64,
+               gc.OGE<<16 | gc.TUINT8,
+               gc.OGE<<16 | gc.TUINT16,
+               gc.OGE<<16 | gc.TUINT32,
+               gc.OGE<<16 | gc.TUINT64:
+               // No OGE for floats, because it mishandles NaN.
+               // Front end must reverse comparison or use OLT and OEQ together.
+               a = ppc64.ABGE
+
+       case gc.OCMP<<16 | gc.TBOOL,
+               gc.OCMP<<16 | gc.TINT8,
+               gc.OCMP<<16 | gc.TINT16,
+               gc.OCMP<<16 | gc.TINT32,
+               gc.OCMP<<16 | gc.TPTR32,
+               gc.OCMP<<16 | gc.TINT64:
+               a = ppc64.ACMP
+
+       case gc.OCMP<<16 | gc.TUINT8,
+               gc.OCMP<<16 | gc.TUINT16,
+               gc.OCMP<<16 | gc.TUINT32,
+               gc.OCMP<<16 | gc.TUINT64,
+               gc.OCMP<<16 | gc.TPTR64:
+               a = ppc64.ACMPU
+
+       case gc.OCMP<<16 | gc.TFLOAT32,
+               gc.OCMP<<16 | gc.TFLOAT64:
+               a = ppc64.AFCMPU
+
+       case gc.OAS<<16 | gc.TBOOL,
+               gc.OAS<<16 | gc.TINT8:
+               a = ppc64.AMOVB
+
+       case gc.OAS<<16 | gc.TUINT8:
+               a = ppc64.AMOVBZ
+
+       case gc.OAS<<16 | gc.TINT16:
+               a = ppc64.AMOVH
+
+       case gc.OAS<<16 | gc.TUINT16:
+               a = ppc64.AMOVHZ
+
+       case gc.OAS<<16 | gc.TINT32:
+               a = ppc64.AMOVW
+
+       case gc.OAS<<16 | gc.TUINT32,
+               gc.OAS<<16 | gc.TPTR32:
+               a = ppc64.AMOVWZ
+
+       case gc.OAS<<16 | gc.TINT64,
+               gc.OAS<<16 | gc.TUINT64,
+               gc.OAS<<16 | gc.TPTR64:
+               a = ppc64.AMOVD
+
+       case gc.OAS<<16 | gc.TFLOAT32:
+               a = ppc64.AFMOVS
+
+       case gc.OAS<<16 | gc.TFLOAT64:
+               a = ppc64.AFMOVD
+
+       case gc.OADD<<16 | gc.TINT8,
+               gc.OADD<<16 | gc.TUINT8,
+               gc.OADD<<16 | gc.TINT16,
+               gc.OADD<<16 | gc.TUINT16,
+               gc.OADD<<16 | gc.TINT32,
+               gc.OADD<<16 | gc.TUINT32,
+               gc.OADD<<16 | gc.TPTR32,
+               gc.OADD<<16 | gc.TINT64,
+               gc.OADD<<16 | gc.TUINT64,
+               gc.OADD<<16 | gc.TPTR64:
+               a = ppc64.AADD
+
+       case gc.OADD<<16 | gc.TFLOAT32:
+               a = ppc64.AFADDS
+
+       case gc.OADD<<16 | gc.TFLOAT64:
+               a = ppc64.AFADD
+
+       case gc.OSUB<<16 | gc.TINT8,
+               gc.OSUB<<16 | gc.TUINT8,
+               gc.OSUB<<16 | gc.TINT16,
+               gc.OSUB<<16 | gc.TUINT16,
+               gc.OSUB<<16 | gc.TINT32,
+               gc.OSUB<<16 | gc.TUINT32,
+               gc.OSUB<<16 | gc.TPTR32,
+               gc.OSUB<<16 | gc.TINT64,
+               gc.OSUB<<16 | gc.TUINT64,
+               gc.OSUB<<16 | gc.TPTR64:
+               a = ppc64.ASUB
+
+       case gc.OSUB<<16 | gc.TFLOAT32:
+               a = ppc64.AFSUBS
+
+       case gc.OSUB<<16 | gc.TFLOAT64:
+               a = ppc64.AFSUB
+
+       case gc.OMINUS<<16 | gc.TINT8,
+               gc.OMINUS<<16 | gc.TUINT8,
+               gc.OMINUS<<16 | gc.TINT16,
+               gc.OMINUS<<16 | gc.TUINT16,
+               gc.OMINUS<<16 | gc.TINT32,
+               gc.OMINUS<<16 | gc.TUINT32,
+               gc.OMINUS<<16 | gc.TPTR32,
+               gc.OMINUS<<16 | gc.TINT64,
+               gc.OMINUS<<16 | gc.TUINT64,
+               gc.OMINUS<<16 | gc.TPTR64:
+               a = ppc64.ANEG
+
+       case gc.OAND<<16 | gc.TINT8,
+               gc.OAND<<16 | gc.TUINT8,
+               gc.OAND<<16 | gc.TINT16,
+               gc.OAND<<16 | gc.TUINT16,
+               gc.OAND<<16 | gc.TINT32,
+               gc.OAND<<16 | gc.TUINT32,
+               gc.OAND<<16 | gc.TPTR32,
+               gc.OAND<<16 | gc.TINT64,
+               gc.OAND<<16 | gc.TUINT64,
+               gc.OAND<<16 | gc.TPTR64:
+               a = ppc64.AAND
+
+       case gc.OOR<<16 | gc.TINT8,
+               gc.OOR<<16 | gc.TUINT8,
+               gc.OOR<<16 | gc.TINT16,
+               gc.OOR<<16 | gc.TUINT16,
+               gc.OOR<<16 | gc.TINT32,
+               gc.OOR<<16 | gc.TUINT32,
+               gc.OOR<<16 | gc.TPTR32,
+               gc.OOR<<16 | gc.TINT64,
+               gc.OOR<<16 | gc.TUINT64,
+               gc.OOR<<16 | gc.TPTR64:
+               a = ppc64.AOR
+
+       case gc.OXOR<<16 | gc.TINT8,
+               gc.OXOR<<16 | gc.TUINT8,
+               gc.OXOR<<16 | gc.TINT16,
+               gc.OXOR<<16 | gc.TUINT16,
+               gc.OXOR<<16 | gc.TINT32,
+               gc.OXOR<<16 | gc.TUINT32,
+               gc.OXOR<<16 | gc.TPTR32,
+               gc.OXOR<<16 | gc.TINT64,
+               gc.OXOR<<16 | gc.TUINT64,
+               gc.OXOR<<16 | gc.TPTR64:
+               a = ppc64.AXOR
+
+               // TODO(minux): handle rotates
+       //case CASE(OLROT, TINT8):
+       //case CASE(OLROT, TUINT8):
+       //case CASE(OLROT, TINT16):
+       //case CASE(OLROT, TUINT16):
+       //case CASE(OLROT, TINT32):
+       //case CASE(OLROT, TUINT32):
+       //case CASE(OLROT, TPTR32):
+       //case CASE(OLROT, TINT64):
+       //case CASE(OLROT, TUINT64):
+       //case CASE(OLROT, TPTR64):
+       //      a = 0//???; RLDC?
+       //      break;
+
+       case gc.OLSH<<16 | gc.TINT8,
+               gc.OLSH<<16 | gc.TUINT8,
+               gc.OLSH<<16 | gc.TINT16,
+               gc.OLSH<<16 | gc.TUINT16,
+               gc.OLSH<<16 | gc.TINT32,
+               gc.OLSH<<16 | gc.TUINT32,
+               gc.OLSH<<16 | gc.TPTR32,
+               gc.OLSH<<16 | gc.TINT64,
+               gc.OLSH<<16 | gc.TUINT64,
+               gc.OLSH<<16 | gc.TPTR64:
+               a = ppc64.ASLD
+
+       case gc.ORSH<<16 | gc.TUINT8,
+               gc.ORSH<<16 | gc.TUINT16,
+               gc.ORSH<<16 | gc.TUINT32,
+               gc.ORSH<<16 | gc.TPTR32,
+               gc.ORSH<<16 | gc.TUINT64,
+               gc.ORSH<<16 | gc.TPTR64:
+               a = ppc64.ASRD
+
+       case gc.ORSH<<16 | gc.TINT8,
+               gc.ORSH<<16 | gc.TINT16,
+               gc.ORSH<<16 | gc.TINT32,
+               gc.ORSH<<16 | gc.TINT64:
+               a = ppc64.ASRAD
+
+               // TODO(minux): handle rotates
+       //case CASE(ORROTC, TINT8):
+       //case CASE(ORROTC, TUINT8):
+       //case CASE(ORROTC, TINT16):
+       //case CASE(ORROTC, TUINT16):
+       //case CASE(ORROTC, TINT32):
+       //case CASE(ORROTC, TUINT32):
+       //case CASE(ORROTC, TINT64):
+       //case CASE(ORROTC, TUINT64):
+       //      a = 0//??? RLDC??
+       //      break;
+
+       case gc.OHMUL<<16 | gc.TINT64:
+               a = ppc64.AMULHD
+
+       case gc.OHMUL<<16 | gc.TUINT64,
+               gc.OHMUL<<16 | gc.TPTR64:
+               a = ppc64.AMULHDU
+
+       case gc.OMUL<<16 | gc.TINT8,
+               gc.OMUL<<16 | gc.TINT16,
+               gc.OMUL<<16 | gc.TINT32,
+               gc.OMUL<<16 | gc.TINT64:
+               a = ppc64.AMULLD
+
+       case gc.OMUL<<16 | gc.TUINT8,
+               gc.OMUL<<16 | gc.TUINT16,
+               gc.OMUL<<16 | gc.TUINT32,
+               gc.OMUL<<16 | gc.TPTR32,
+               // don't use word multiply, the high 32-bit are undefined.
+               gc.OMUL<<16 | gc.TUINT64,
+               gc.OMUL<<16 | gc.TPTR64:
+               // for 64-bit multiplies, signedness doesn't matter.
+               a = ppc64.AMULLD
+
+       case gc.OMUL<<16 | gc.TFLOAT32:
+               a = ppc64.AFMULS
+
+       case gc.OMUL<<16 | gc.TFLOAT64:
+               a = ppc64.AFMUL
+
+       case gc.ODIV<<16 | gc.TINT8,
+               gc.ODIV<<16 | gc.TINT16,
+               gc.ODIV<<16 | gc.TINT32,
+               gc.ODIV<<16 | gc.TINT64:
+               a = ppc64.ADIVD
+
+       case gc.ODIV<<16 | gc.TUINT8,
+               gc.ODIV<<16 | gc.TUINT16,
+               gc.ODIV<<16 | gc.TUINT32,
+               gc.ODIV<<16 | gc.TPTR32,
+               gc.ODIV<<16 | gc.TUINT64,
+               gc.ODIV<<16 | gc.TPTR64:
+               a = ppc64.ADIVDU
+
+       case gc.ODIV<<16 | gc.TFLOAT32:
+               a = ppc64.AFDIVS
+
+       case gc.ODIV<<16 | gc.TFLOAT64:
+               a = ppc64.AFDIV
+       }
+
+       return a
+}
+
+const (
+       ODynam   = 1 << 0
+       OAddable = 1 << 1
+)
+
+func xgen(n *gc.Node, a *gc.Node, o int) bool {
+       // TODO(minux)
+
+       return -1 != 0 /*TypeKind(100016)*/
+}
+
+func sudoclean() {
+       return
+}
+
+/*
+ * generate code to compute address of n,
+ * a reference to a (perhaps nested) field inside
+ * an array or struct.
+ * return 0 on failure, 1 on success.
+ * on success, leaves usable address in a.
+ *
+ * caller is responsible for calling sudoclean
+ * after successful sudoaddable,
+ * to release the register used for a.
+ */
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+       // TODO(minux)
+
+       *a = obj.Addr{}
+       return false
+}
diff --git a/src/cmd/compile/internal/mips64/opt.go b/src/cmd/compile/internal/mips64/opt.go
new file mode 100644 (file)
index 0000000..1704f63
--- /dev/null
@@ -0,0 +1,12 @@
+// Copyright 2014 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+// Many Power ISA arithmetic and logical instructions come in four
+// standard variants.  These bits let us map between variants.
+const (
+       V_CC = 1 << 0 // xCC (affect CR field 0 flags)
+       V_V  = 1 << 1 // xV (affect SO and OV flags)
+)
diff --git a/src/cmd/compile/internal/mips64/peep.go b/src/cmd/compile/internal/mips64/peep.go
new file mode 100644 (file)
index 0000000..9c3f1ed
--- /dev/null
@@ -0,0 +1,1051 @@
+// Derived from Inferno utils/6c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
+//
+//     Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//     Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//     Portions Copyright © 1997-1999 Vita Nuova Limited
+//     Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//     Portions Copyright © 2004,2006 Bruce Ellis
+//     Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//     Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//     Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import (
+       "cmd/compile/internal/gc"
+       "cmd/internal/obj"
+       "cmd/internal/obj/ppc64"
+       "fmt"
+)
+
+var gactive uint32
+
+func peep(firstp *obj.Prog) {
+       g := (*gc.Graph)(gc.Flowstart(firstp, nil))
+       if g == nil {
+               return
+       }
+       gactive = 0
+
+       var p *obj.Prog
+       var r *gc.Flow
+       var t int
+loop1:
+       if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+               gc.Dumpit("loop1", g.Start, 0)
+       }
+
+       t = 0
+       for r = g.Start; r != nil; r = r.Link {
+               p = r.Prog
+
+               // TODO(austin) Handle smaller moves.  arm and amd64
+               // distinguish between moves that moves that *must*
+               // sign/zero extend and moves that don't care so they
+               // can eliminate moves that don't care without
+               // breaking moves that do care.  This might let us
+               // simplify or remove the next peep loop, too.
+               if p.As == ppc64.AMOVD || p.As == ppc64.AFMOVD {
+                       if regtyp(&p.To) {
+                               // Try to eliminate reg->reg moves
+                               if regtyp(&p.From) {
+                                       if p.From.Type == p.To.Type {
+                                               if copyprop(r) {
+                                                       excise(r)
+                                                       t++
+                                               } else if subprop(r) && copyprop(r) {
+                                                       excise(r)
+                                                       t++
+                                               }
+                                       }
+                               }
+
+                               // Convert uses to $0 to uses of R0 and
+                               // propagate R0
+                               if regzer(&p.From) != 0 {
+                                       if p.To.Type == obj.TYPE_REG {
+                                               p.From.Type = obj.TYPE_REG
+                                               p.From.Reg = ppc64.REGZERO
+                                               if copyprop(r) {
+                                                       excise(r)
+                                                       t++
+                                               } else if subprop(r) && copyprop(r) {
+                                                       excise(r)
+                                                       t++
+                                               }
+                                       }
+                               }
+                       }
+               }
+       }
+
+       if t != 0 {
+               goto loop1
+       }
+
+       /*
+        * look for MOVB x,R; MOVB R,R (for small MOVs not handled above)
+        */
+       var p1 *obj.Prog
+       var r1 *gc.Flow
+       for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+               p = r.Prog
+               switch p.As {
+               default:
+                       continue
+
+               case ppc64.AMOVH,
+                       ppc64.AMOVHZ,
+                       ppc64.AMOVB,
+                       ppc64.AMOVBZ,
+                       ppc64.AMOVW,
+                       ppc64.AMOVWZ:
+                       if p.To.Type != obj.TYPE_REG {
+                               continue
+                       }
+               }
+
+               r1 = r.Link
+               if r1 == nil {
+                       continue
+               }
+               p1 = r1.Prog
+               if p1.As != p.As {
+                       continue
+               }
+               if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg {
+                       continue
+               }
+               if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg {
+                       continue
+               }
+               excise(r1)
+       }
+
+       if gc.Debug['D'] > 1 {
+               goto ret /* allow following code improvement to be suppressed */
+       }
+
+       /*
+        * look for OP x,y,R; CMP R, $0 -> OPCC x,y,R
+        * when OP can set condition codes correctly
+        */
+       for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+               p = r.Prog
+               switch p.As {
+               case ppc64.ACMP,
+                       ppc64.ACMPW: /* always safe? */
+                       if regzer(&p.To) == 0 {
+                               continue
+                       }
+                       r1 = r.S1
+                       if r1 == nil {
+                               continue
+                       }
+                       switch r1.Prog.As {
+                       default:
+                               continue
+
+                               /* the conditions can be complex and these are currently little used */
+                       case ppc64.ABCL,
+                               ppc64.ABC:
+                               continue
+
+                       case ppc64.ABEQ,
+                               ppc64.ABGE,
+                               ppc64.ABGT,
+                               ppc64.ABLE,
+                               ppc64.ABLT,
+                               ppc64.ABNE,
+                               ppc64.ABVC,
+                               ppc64.ABVS:
+                               break
+                       }
+
+                       r1 = r
+                       for {
+                               r1 = gc.Uniqp(r1)
+                               if r1 == nil || r1.Prog.As != obj.ANOP {
+                                       break
+                               }
+                       }
+
+                       if r1 == nil {
+                               continue
+                       }
+                       p1 = r1.Prog
+                       if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.From.Reg {
+                               continue
+                       }
+                       switch p1.As {
+                       /* irregular instructions */
+                       case ppc64.ASUB,
+                               ppc64.AADD,
+                               ppc64.AXOR,
+                               ppc64.AOR:
+                               if p1.From.Type == obj.TYPE_CONST || p1.From.Type == obj.TYPE_ADDR {
+                                       continue
+                               }
+                       }
+
+                       switch p1.As {
+                       default:
+                               continue
+
+                       case ppc64.AMOVW,
+                               ppc64.AMOVD:
+                               if p1.From.Type != obj.TYPE_REG {
+                                       continue
+                               }
+                               continue
+
+                       case ppc64.AANDCC,
+                               ppc64.AANDNCC,
+                               ppc64.AORCC,
+                               ppc64.AORNCC,
+                               ppc64.AXORCC,
+                               ppc64.ASUBCC,
+                               ppc64.ASUBECC,
+                               ppc64.ASUBMECC,
+                               ppc64.ASUBZECC,
+                               ppc64.AADDCC,
+                               ppc64.AADDCCC,
+                               ppc64.AADDECC,
+                               ppc64.AADDMECC,
+                               ppc64.AADDZECC,
+                               ppc64.ARLWMICC,
+                               ppc64.ARLWNMCC,
+                               /* don't deal with floating point instructions for now */
+                               /*
+                                       case AFABS:
+                                       case AFADD:
+                                       case AFADDS:
+                                       case AFCTIW:
+                                       case AFCTIWZ:
+                                       case AFDIV:
+                                       case AFDIVS:
+                                       case AFMADD:
+                                       case AFMADDS:
+                                       case AFMOVD:
+                                       case AFMSUB:
+                                       case AFMSUBS:
+                                       case AFMUL:
+                                       case AFMULS:
+                                       case AFNABS:
+                                       case AFNEG:
+                                       case AFNMADD:
+                                       case AFNMADDS:
+                                       case AFNMSUB:
+                                       case AFNMSUBS:
+                                       case AFRSP:
+                                       case AFSUB:
+                                       case AFSUBS:
+                                       case ACNTLZW:
+                                       case AMTFSB0:
+                                       case AMTFSB1:
+                               */
+                               ppc64.AADD,
+                               ppc64.AADDV,
+                               ppc64.AADDC,
+                               ppc64.AADDCV,
+                               ppc64.AADDME,
+                               ppc64.AADDMEV,
+                               ppc64.AADDE,
+                               ppc64.AADDEV,
+                               ppc64.AADDZE,
+                               ppc64.AADDZEV,
+                               ppc64.AAND,
+                               ppc64.AANDN,
+                               ppc64.ADIVW,
+                               ppc64.ADIVWV,
+                               ppc64.ADIVWU,
+                               ppc64.ADIVWUV,
+                               ppc64.ADIVD,
+                               ppc64.ADIVDV,
+                               ppc64.ADIVDU,
+                               ppc64.ADIVDUV,
+                               ppc64.AEQV,
+                               ppc64.AEXTSB,
+                               ppc64.AEXTSH,
+                               ppc64.AEXTSW,
+                               ppc64.AMULHW,
+                               ppc64.AMULHWU,
+                               ppc64.AMULLW,
+                               ppc64.AMULLWV,
+                               ppc64.AMULHD,
+                               ppc64.AMULHDU,
+                               ppc64.AMULLD,
+                               ppc64.AMULLDV,
+                               ppc64.ANAND,
+                               ppc64.ANEG,
+                               ppc64.ANEGV,
+                               ppc64.ANOR,
+                               ppc64.AOR,
+                               ppc64.AORN,
+                               ppc64.AREM,
+                               ppc64.AREMV,
+                               ppc64.AREMU,
+                               ppc64.AREMUV,
+                               ppc64.AREMD,
+                               ppc64.AREMDV,
+                               ppc64.AREMDU,
+                               ppc64.AREMDUV,
+                               ppc64.ARLWMI,
+                               ppc64.ARLWNM,
+                               ppc64.ASLW,
+                               ppc64.ASRAW,
+                               ppc64.ASRW,
+                               ppc64.ASLD,
+                               ppc64.ASRAD,
+                               ppc64.ASRD,
+                               ppc64.ASUB,
+                               ppc64.ASUBV,
+                               ppc64.ASUBC,
+                               ppc64.ASUBCV,
+                               ppc64.ASUBME,
+                               ppc64.ASUBMEV,
+                               ppc64.ASUBE,
+                               ppc64.ASUBEV,
+                               ppc64.ASUBZE,
+                               ppc64.ASUBZEV,
+                               ppc64.AXOR:
+                               t = variant2as(int(p1.As), as2variant(int(p1.As))|V_CC)
+                       }
+
+                       if gc.Debug['D'] != 0 {
+                               fmt.Printf("cmp %v; %v -> ", p1, p)
+                       }
+                       p1.As = int16(t)
+                       if gc.Debug['D'] != 0 {
+                               fmt.Printf("%v\n", p1)
+                       }
+                       excise(r)
+                       continue
+               }
+       }
+
+ret:
+       gc.Flowend(g)
+}
+
+func excise(r *gc.Flow) {
+       p := (*obj.Prog)(r.Prog)
+       if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+               fmt.Printf("%v ===delete===\n", p)
+       }
+       obj.Nopout(p)
+       gc.Ostats.Ndelmov++
+}
+
+/*
+ * regzer returns 1 if a's value is 0 (a is R0 or $0)
+ */
+func regzer(a *obj.Addr) int {
+       if a.Type == obj.TYPE_CONST || a.Type == obj.TYPE_ADDR {
+               if a.Sym == nil && a.Reg == 0 {
+                       if a.Offset == 0 {
+                               return 1
+                       }
+               }
+       }
+       if a.Type == obj.TYPE_REG {
+               if a.Reg == ppc64.REGZERO {
+                       return 1
+               }
+       }
+       return 0
+}
+
+func regtyp(a *obj.Addr) bool {
+       // TODO(rsc): Floating point register exclusions?
+       return a.Type == obj.TYPE_REG && ppc64.REG_R0 <= a.Reg && a.Reg <= ppc64.REG_F31 && a.Reg != ppc64.REGZERO
+}
+
+/*
+ * the idea is to substitute
+ * one register for another
+ * from one MOV to another
+ *     MOV     a, R1
+ *     ADD     b, R1   / no use of R2
+ *     MOV     R1, R2
+ * would be converted to
+ *     MOV     a, R2
+ *     ADD     b, R2
+ *     MOV     R2, R1
+ * hopefully, then the former or latter MOV
+ * will be eliminated by copy propagation.
+ *
+ * r0 (the argument, not the register) is the MOV at the end of the
+ * above sequences.  This returns 1 if it modified any instructions.
+ */
+func subprop(r0 *gc.Flow) bool {
+       p := (*obj.Prog)(r0.Prog)
+       v1 := (*obj.Addr)(&p.From)
+       if !regtyp(v1) {
+               return false
+       }
+       v2 := (*obj.Addr)(&p.To)
+       if !regtyp(v2) {
+               return false
+       }
+       for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+               if gc.Uniqs(r) == nil {
+                       break
+               }
+               p = r.Prog
+               if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+                       continue
+               }
+               if p.Info.Flags&gc.Call != 0 {
+                       return false
+               }
+
+               if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
+                       if p.To.Type == v1.Type {
+                               if p.To.Reg == v1.Reg {
+                                       copysub(&p.To, v1, v2, 1)
+                                       if gc.Debug['P'] != 0 {
+                                               fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+                                               if p.From.Type == v2.Type {
+                                                       fmt.Printf(" excise")
+                                               }
+                                               fmt.Printf("\n")
+                                       }
+
+                                       for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+                                               p = r.Prog
+                                               copysub(&p.From, v1, v2, 1)
+                                               copysub1(p, v1, v2, 1)
+                                               copysub(&p.To, v1, v2, 1)
+                                               if gc.Debug['P'] != 0 {
+                                                       fmt.Printf("%v\n", r.Prog)
+                                               }
+                                       }
+
+                                       t := int(int(v1.Reg))
+                                       v1.Reg = v2.Reg
+                                       v2.Reg = int16(t)
+                                       if gc.Debug['P'] != 0 {
+                                               fmt.Printf("%v last\n", r.Prog)
+                                       }
+                                       return true
+                               }
+                       }
+               }
+
+               if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
+                       break
+               }
+               if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
+                       break
+               }
+       }
+
+       return false
+}
+
+/*
+ * The idea is to remove redundant copies.
+ *     v1->v2  F=0
+ *     (use v2 s/v2/v1/)*
+ *     set v1  F=1
+ *     use v2  return fail (v1->v2 move must remain)
+ *     -----------------
+ *     v1->v2  F=0
+ *     (use v2 s/v2/v1/)*
+ *     set v1  F=1
+ *     set v2  return success (caller can remove v1->v2 move)
+ */
+func copyprop(r0 *gc.Flow) bool {
+       p := (*obj.Prog)(r0.Prog)
+       v1 := (*obj.Addr)(&p.From)
+       v2 := (*obj.Addr)(&p.To)
+       if copyas(v1, v2) {
+               if gc.Debug['P'] != 0 {
+                       fmt.Printf("eliminating self-move: %v\n", r0.Prog)
+               }
+               return true
+       }
+
+       gactive++
+       if gc.Debug['P'] != 0 {
+               fmt.Printf("trying to eliminate %v->%v move from:\n%v\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r0.Prog)
+       }
+       return copy1(v1, v2, r0.S1, 0)
+}
+
+// copy1 replaces uses of v2 with v1 starting at r and returns 1 if
+// all uses were rewritten.
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
+       if uint32(r.Active) == gactive {
+               if gc.Debug['P'] != 0 {
+                       fmt.Printf("act set; return 1\n")
+               }
+               return true
+       }
+
+       r.Active = int32(gactive)
+       if gc.Debug['P'] != 0 {
+               fmt.Printf("copy1 replace %v with %v f=%d\n", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), f)
+       }
+       var t int
+       var p *obj.Prog
+       for ; r != nil; r = r.S1 {
+               p = r.Prog
+               if gc.Debug['P'] != 0 {
+                       fmt.Printf("%v", p)
+               }
+               if f == 0 && gc.Uniqp(r) == nil {
+                       // Multiple predecessors; conservatively
+                       // assume v1 was set on other path
+                       f = 1
+
+                       if gc.Debug['P'] != 0 {
+                               fmt.Printf("; merge; f=%d", f)
+                       }
+               }
+
+               t = copyu(p, v2, nil)
+               switch t {
+               case 2: /* rar, can't split */
+                       if gc.Debug['P'] != 0 {
+                               fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
+                       }
+                       return false
+
+               case 3: /* set */
+                       if gc.Debug['P'] != 0 {
+                               fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
+                       }
+                       return true
+
+               case 1, /* used, substitute */
+                       4: /* use and set */
+                       if f != 0 {
+                               if gc.Debug['P'] == 0 {
+                                       return false
+                               }
+                               if t == 4 {
+                                       fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+                               } else {
+                                       fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+                               }
+                               return false
+                       }
+
+                       if copyu(p, v2, v1) != 0 {
+                               if gc.Debug['P'] != 0 {
+                                       fmt.Printf("; sub fail; return 0\n")
+                               }
+                               return false
+                       }
+
+                       if gc.Debug['P'] != 0 {
+                               fmt.Printf("; sub %v->%v\n => %v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), p)
+                       }
+                       if t == 4 {
+                               if gc.Debug['P'] != 0 {
+                                       fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
+                               }
+                               return true
+                       }
+               }
+
+               if f == 0 {
+                       t = copyu(p, v1, nil)
+                       if f == 0 && (t == 2 || t == 3 || t == 4) {
+                               f = 1
+                               if gc.Debug['P'] != 0 {
+                                       fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
+                               }
+                       }
+               }
+
+               if gc.Debug['P'] != 0 {
+                       fmt.Printf("\n")
+               }
+               if r.S2 != nil {
+                       if !copy1(v1, v2, r.S2, f) {
+                               return false
+                       }
+               }
+       }
+
+       return true
+}
+
+// If s==nil, copyu returns the set/use of v in p; otherwise, it
+// modifies p to replace reads of v with reads of s and returns 0 for
+// success or non-zero for failure.
+//
+// If s==nil, copy returns one of the following values:
+//     1 if v only used
+//     2 if v is set and used in one address (read-alter-rewrite;
+//       can't substitute)
+//     3 if v is only set
+//     4 if v is set in one address and used in another (so addresses
+//       can be rewritten independently)
+//     0 otherwise (not touched)
+func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
+       if p.From3Type() != obj.TYPE_NONE {
+               // 9g never generates a from3
+               fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3))
+       }
+
+       switch p.As {
+       default:
+               fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+               return 2
+
+       case obj.ANOP, /* read p->from, write p->to */
+               ppc64.AMOVH,
+               ppc64.AMOVHZ,
+               ppc64.AMOVB,
+               ppc64.AMOVBZ,
+               ppc64.AMOVW,
+               ppc64.AMOVWZ,
+               ppc64.AMOVD,
+               ppc64.ANEG,
+               ppc64.ANEGCC,
+               ppc64.AADDME,
+               ppc64.AADDMECC,
+               ppc64.AADDZE,
+               ppc64.AADDZECC,
+               ppc64.ASUBME,
+               ppc64.ASUBMECC,
+               ppc64.ASUBZE,
+               ppc64.ASUBZECC,
+               ppc64.AFCTIW,
+               ppc64.AFCTIWZ,
+               ppc64.AFCTID,
+               ppc64.AFCTIDZ,
+               ppc64.AFCFID,
+               ppc64.AFCFIDCC,
+               ppc64.AFMOVS,
+               ppc64.AFMOVD,
+               ppc64.AFRSP,
+               ppc64.AFNEG,
+               ppc64.AFNEGCC:
+               if s != nil {
+                       if copysub(&p.From, v, s, 1) != 0 {
+                               return 1
+                       }
+
+                       // Update only indirect uses of v in p->to
+                       if !copyas(&p.To, v) {
+                               if copysub(&p.To, v, s, 1) != 0 {
+                                       return 1
+                               }
+                       }
+                       return 0
+               }
+
+               if copyas(&p.To, v) {
+                       // Fix up implicit from
+                       if p.From.Type == obj.TYPE_NONE {
+                               p.From = p.To
+                       }
+                       if copyau(&p.From, v) {
+                               return 4
+                       }
+                       return 3
+               }
+
+               if copyau(&p.From, v) {
+                       return 1
+               }
+               if copyau(&p.To, v) {
+                       // p->to only indirectly uses v
+                       return 1
+               }
+
+               return 0
+
+       case ppc64.AMOVBU, /* rar p->from, write p->to or read p->from, rar p->to */
+               ppc64.AMOVBZU,
+               ppc64.AMOVHU,
+               ppc64.AMOVHZU,
+               ppc64.AMOVWZU,
+               ppc64.AMOVDU:
+               if p.From.Type == obj.TYPE_MEM {
+                       if copyas(&p.From, v) {
+                               // No s!=nil check; need to fail
+                               // anyway in that case
+                               return 2
+                       }
+
+                       if s != nil {
+                               if copysub(&p.To, v, s, 1) != 0 {
+                                       return 1
+                               }
+                               return 0
+                       }
+
+                       if copyas(&p.To, v) {
+                               return 3
+                       }
+               } else if p.To.Type == obj.TYPE_MEM {
+                       if copyas(&p.To, v) {
+                               return 2
+                       }
+                       if s != nil {
+                               if copysub(&p.From, v, s, 1) != 0 {
+                                       return 1
+                               }
+                               return 0
+                       }
+
+                       if copyau(&p.From, v) {
+                               return 1
+                       }
+               } else {
+                       fmt.Printf("copyu: bad %v\n", p)
+               }
+
+               return 0
+
+       case ppc64.ARLWMI, /* read p->from, read p->reg, rar p->to */
+               ppc64.ARLWMICC:
+               if copyas(&p.To, v) {
+                       return 2
+               }
+               fallthrough
+
+               /* fall through */
+       case ppc64.AADD,
+               /* read p->from, read p->reg, write p->to */
+               ppc64.AADDC,
+               ppc64.AADDE,
+               ppc64.ASUB,
+               ppc64.ASLW,
+               ppc64.ASRW,
+               ppc64.ASRAW,
+               ppc64.ASLD,
+               ppc64.ASRD,
+               ppc64.ASRAD,
+               ppc64.AOR,
+               ppc64.AORCC,
+               ppc64.AORN,
+               ppc64.AORNCC,
+               ppc64.AAND,
+               ppc64.AANDCC,
+               ppc64.AANDN,
+               ppc64.AANDNCC,
+               ppc64.ANAND,
+               ppc64.ANANDCC,
+               ppc64.ANOR,
+               ppc64.ANORCC,
+               ppc64.AXOR,
+               ppc64.AMULHW,
+               ppc64.AMULHWU,
+               ppc64.AMULLW,
+               ppc64.AMULLD,
+               ppc64.ADIVW,
+               ppc64.ADIVD,
+               ppc64.ADIVWU,
+               ppc64.ADIVDU,
+               ppc64.AREM,
+               ppc64.AREMU,
+               ppc64.AREMD,
+               ppc64.AREMDU,
+               ppc64.ARLWNM,
+               ppc64.ARLWNMCC,
+               ppc64.AFADDS,
+               ppc64.AFADD,
+               ppc64.AFSUBS,
+               ppc64.AFSUB,
+               ppc64.AFMULS,
+               ppc64.AFMUL,
+               ppc64.AFDIVS,
+               ppc64.AFDIV:
+               if s != nil {
+                       if copysub(&p.From, v, s, 1) != 0 {
+                               return 1
+                       }
+                       if copysub1(p, v, s, 1) != 0 {
+                               return 1
+                       }
+
+                       // Update only indirect uses of v in p->to
+                       if !copyas(&p.To, v) {
+                               if copysub(&p.To, v, s, 1) != 0 {
+                                       return 1
+                               }
+                       }
+                       return 0
+               }
+
+               if copyas(&p.To, v) {
+                       if p.Reg == 0 {
+                               // Fix up implicit reg (e.g., ADD
+                               // R3,R4 -> ADD R3,R4,R4) so we can
+                               // update reg and to separately.
+                               p.Reg = p.To.Reg
+                       }
+
+                       if copyau(&p.From, v) {
+                               return 4
+                       }
+                       if copyau1(p, v) {
+                               return 4
+                       }
+                       return 3
+               }
+
+               if copyau(&p.From, v) {
+                       return 1
+               }
+               if copyau1(p, v) {
+                       return 1
+               }
+               if copyau(&p.To, v) {
+                       return 1
+               }
+               return 0
+
+       case ppc64.ABEQ,
+               ppc64.ABGT,
+               ppc64.ABGE,
+               ppc64.ABLT,
+               ppc64.ABLE,
+               ppc64.ABNE,
+               ppc64.ABVC,
+               ppc64.ABVS:
+               return 0
+
+       case obj.ACHECKNIL, /* read p->from */
+               ppc64.ACMP, /* read p->from, read p->to */
+               ppc64.ACMPU,
+               ppc64.ACMPW,
+               ppc64.ACMPWU,
+               ppc64.AFCMPO,
+               ppc64.AFCMPU:
+               if s != nil {
+                       if copysub(&p.From, v, s, 1) != 0 {
+                               return 1
+                       }
+                       return copysub(&p.To, v, s, 1)
+               }
+
+               if copyau(&p.From, v) {
+                       return 1
+               }
+               if copyau(&p.To, v) {
+                       return 1
+               }
+               return 0
+
+               // 9g never generates a branch to a GPR (this isn't
+       // even a normal instruction; liblink turns it in to a
+       // mov and a branch).
+       case ppc64.ABR: /* read p->to */
+               if s != nil {
+                       if copysub(&p.To, v, s, 1) != 0 {
+                               return 1
+                       }
+                       return 0
+               }
+
+               if copyau(&p.To, v) {
+                       return 1
+               }
+               return 0
+
+       case obj.ARET: /* funny */
+               if s != nil {
+                       return 0
+               }
+
+               // All registers die at this point, so claim
+               // everything is set (and not used).
+               return 3
+
+       case ppc64.ABL: /* funny */
+               if v.Type == obj.TYPE_REG {
+                       // TODO(rsc): REG_R0 and REG_F0 used to be
+                       // (when register numbers started at 0) exregoffset and exfregoffset,
+                       // which are unset entirely.
+                       // It's strange that this handles R0 and F0 differently from the other
+                       // registers. Possible failure to optimize?
+                       if ppc64.REG_R0 < v.Reg && v.Reg <= ppc64.REGEXT {
+                               return 2
+                       }
+                       if v.Reg == ppc64.REGARG {
+                               return 2
+                       }
+                       if ppc64.REG_F0 < v.Reg && v.Reg <= ppc64.FREGEXT {
+                               return 2
+                       }
+               }
+
+               if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
+                       return 2
+               }
+
+               if s != nil {
+                       if copysub(&p.To, v, s, 1) != 0 {
+                               return 1
+                       }
+                       return 0
+               }
+
+               if copyau(&p.To, v) {
+                       return 4
+               }
+               return 3
+
+               // R0 is zero, used by DUFFZERO, cannot be substituted.
+       // R3 is ptr to memory, used and set, cannot be substituted.
+       case obj.ADUFFZERO:
+               if v.Type == obj.TYPE_REG {
+                       if v.Reg == 0 {
+                               return 1
+                       }
+                       if v.Reg == 3 {
+                               return 2
+                       }
+               }
+
+               return 0
+
+               // R3, R4 are ptr to src, dst, used and set, cannot be substituted.
+       // R5 is scratch, set by DUFFCOPY, cannot be substituted.
+       case obj.ADUFFCOPY:
+               if v.Type == obj.TYPE_REG {
+                       if v.Reg == 3 || v.Reg == 4 {
+                               return 2
+                       }
+                       if v.Reg == 5 {
+                               return 3
+                       }
+               }
+
+               return 0
+
+       case obj.ATEXT: /* funny */
+               if v.Type == obj.TYPE_REG {
+                       if v.Reg == ppc64.REGARG {
+                               return 3
+                       }
+               }
+               return 0
+
+       case obj.APCDATA,
+               obj.AFUNCDATA,
+               obj.AVARDEF,
+               obj.AVARKILL:
+               return 0
+       }
+}
+
+// copyas returns 1 if a and v address the same register.
+//
+// If a is the from operand, this means this operation reads the
+// register in v.  If a is the to operand, this means this operation
+// writes the register in v.
+func copyas(a *obj.Addr, v *obj.Addr) bool {
+       if regtyp(v) {
+               if a.Type == v.Type {
+                       if a.Reg == v.Reg {
+                               return true
+                       }
+               }
+       }
+       return false
+}
+
+// copyau returns 1 if a either directly or indirectly addresses the
+// same register as v.
+//
+// If a is the from operand, this means this operation reads the
+// register in v.  If a is the to operand, this means the operation
+// either reads or writes the register in v (if !copyas(a, v), then
+// the operation reads the register in v).
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+       if copyas(a, v) {
+               return true
+       }
+       if v.Type == obj.TYPE_REG {
+               if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) {
+                       if v.Reg == a.Reg {
+                               return true
+                       }
+               }
+       }
+       return false
+}
+
+// copyau1 returns 1 if p->reg references the same register as v and v
+// is a direct reference.
+func copyau1(p *obj.Prog, v *obj.Addr) bool {
+       if regtyp(v) && v.Reg != 0 {
+               if p.Reg == v.Reg {
+                       return true
+               }
+       }
+       return false
+}
+
+// copysub replaces v with s in a if f!=0 or indicates it if could if f==0.
+// Returns 1 on failure to substitute (it always succeeds on ppc64).
+func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
+       if f != 0 {
+               if copyau(a, v) {
+                       a.Reg = s.Reg
+               }
+       }
+       return 0
+}
+
+// copysub1 replaces v with s in p1->reg if f!=0 or indicates if it could if f==0.
+// Returns 1 on failure to substitute (it always succeeds on ppc64).
+func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
+       if f != 0 {
+               if copyau1(p1, v) {
+                       p1.Reg = s.Reg
+               }
+       }
+       return 0
+}
+
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
+       if a.Type != v.Type {
+               return false
+       }
+       if regtyp(v) && a.Reg == v.Reg {
+               return true
+       }
+       if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM {
+               if v.Offset == a.Offset {
+                       return true
+               }
+       }
+       return false
+}
+
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+       return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
+}
+
+func stackaddr(a *obj.Addr) bool {
+       return a.Type == obj.TYPE_REG && a.Reg == ppc64.REGSP
+}
diff --git a/src/cmd/compile/internal/mips64/prog.go b/src/cmd/compile/internal/mips64/prog.go
new file mode 100644 (file)
index 0000000..9b8719b
--- /dev/null
@@ -0,0 +1,314 @@
+// Copyright 2014 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+       "cmd/compile/internal/gc"
+       "cmd/internal/obj"
+       "cmd/internal/obj/ppc64"
+)
+
+const (
+       LeftRdwr  uint32 = gc.LeftRead | gc.LeftWrite
+       RightRdwr uint32 = gc.RightRead | gc.RightWrite
+)
+
+// This table gives the basic information about instruction
+// generated by the compiler and processed in the optimizer.
+// See opt.h for bit definitions.
+//
+// Instructions not generated need not be listed.
+// As an exception to that rule, we typically write down all the
+// size variants of an operation even if we just use a subset.
+//
+// The table is formatted for 8-space tabs.
+var progtable = [ppc64.ALAST]obj.ProgInfo{
+       obj.ATYPE:     {Flags: gc.Pseudo | gc.Skip},
+       obj.ATEXT:     {Flags: gc.Pseudo},
+       obj.AFUNCDATA: {Flags: gc.Pseudo},
+       obj.APCDATA:   {Flags: gc.Pseudo},
+       obj.AUNDEF:    {Flags: gc.Break},
+       obj.AUSEFIELD: {Flags: gc.OK},
+       obj.ACHECKNIL: {Flags: gc.LeftRead},
+       obj.AVARDEF:   {Flags: gc.Pseudo | gc.RightWrite},
+       obj.AVARKILL:  {Flags: gc.Pseudo | gc.RightWrite},
+
+       // NOP is an internal no-op that also stands
+       // for USED and SET annotations, not the Power opcode.
+       obj.ANOP: {Flags: gc.LeftRead | gc.RightWrite},
+
+       // Integer
+       ppc64.AADD:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.ASUB:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.ANEG:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AAND:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AOR:     {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AXOR:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AMULLD:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AMULLW:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AMULHD:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AMULHDU: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.ADIVD:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.ADIVDU:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.ASLD:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.ASRD:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.ASRAD:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.ACMP:    {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
+       ppc64.ACMPU:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
+       ppc64.ATD:     {Flags: gc.SizeQ | gc.RightRead},
+
+       // Floating point.
+       ppc64.AFADD:   {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AFADDS:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AFSUB:   {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AFSUBS:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AFMUL:   {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AFMULS:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AFDIV:   {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AFDIVS:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AFCTIDZ: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AFCFID:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       ppc64.AFCMPU:  {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
+       ppc64.AFRSP:   {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
+
+       // Moves
+       ppc64.AMOVB:  {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+       ppc64.AMOVBU: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
+       ppc64.AMOVBZ: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+       ppc64.AMOVH:  {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+       ppc64.AMOVHU: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
+       ppc64.AMOVHZ: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+       ppc64.AMOVW:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+
+       // there is no AMOVWU.
+       ppc64.AMOVWZU: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
+       ppc64.AMOVWZ:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+       ppc64.AMOVD:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
+       ppc64.AMOVDU:  {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move | gc.PostInc},
+       ppc64.AFMOVS:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+       ppc64.AFMOVD:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
+
+       // Jumps
+       ppc64.ABR:     {Flags: gc.Jump | gc.Break},
+       ppc64.ABL:     {Flags: gc.Call},
+       ppc64.ABEQ:    {Flags: gc.Cjmp},
+       ppc64.ABNE:    {Flags: gc.Cjmp},
+       ppc64.ABGE:    {Flags: gc.Cjmp},
+       ppc64.ABLT:    {Flags: gc.Cjmp},
+       ppc64.ABGT:    {Flags: gc.Cjmp},
+       ppc64.ABLE:    {Flags: gc.Cjmp},
+       obj.ARET:      {Flags: gc.Break},
+       obj.ADUFFZERO: {Flags: gc.Call},
+       obj.ADUFFCOPY: {Flags: gc.Call},
+}
+
+var initproginfo_initialized int
+
+func initproginfo() {
+       var addvariant = []int{V_CC, V_V, V_CC | V_V}
+
+       if initproginfo_initialized != 0 {
+               return
+       }
+       initproginfo_initialized = 1
+
+       // Perform one-time expansion of instructions in progtable to
+       // their CC, V, and VCC variants
+       var as2 int
+       var i int
+       var variant int
+       for as := int(0); as < len(progtable); as++ {
+               if progtable[as].Flags == 0 {
+                       continue
+               }
+               variant = as2variant(as)
+               for i = 0; i < len(addvariant); i++ {
+                       as2 = variant2as(as, variant|addvariant[i])
+                       if as2 != 0 && progtable[as2].Flags == 0 {
+                               progtable[as2] = progtable[as]
+                       }
+               }
+       }
+}
+
+func proginfo(p *obj.Prog) {
+       initproginfo()
+
+       info := &p.Info
+       *info = progtable[p.As]
+       if info.Flags == 0 {
+               gc.Fatalf("proginfo: unknown instruction %v", p)
+       }
+
+       if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
+               info.Flags &^= gc.RegRead
+               info.Flags |= gc.RightRead /*CanRegRead |*/
+       }
+
+       if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR) && p.From.Reg != 0 {
+               info.Regindex |= RtoB(int(p.From.Reg))
+               if info.Flags&gc.PostInc != 0 {
+                       info.Regset |= RtoB(int(p.From.Reg))
+               }
+       }
+
+       if (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) && p.To.Reg != 0 {
+               info.Regindex |= RtoB(int(p.To.Reg))
+               if info.Flags&gc.PostInc != 0 {
+                       info.Regset |= RtoB(int(p.To.Reg))
+               }
+       }
+
+       if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
+               info.Flags &^= gc.LeftRead
+               info.Flags |= gc.LeftAddr
+       }
+
+       if p.As == obj.ADUFFZERO {
+               info.Reguse |= 1<<0 | RtoB(ppc64.REG_R3)
+               info.Regset |= RtoB(ppc64.REG_R3)
+       }
+
+       if p.As == obj.ADUFFCOPY {
+               // TODO(austin) Revisit when duffcopy is implemented
+               info.Reguse |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4) | RtoB(ppc64.REG_R5)
+
+               info.Regset |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4)
+       }
+}
+
+// Instruction variants table.  Initially this contains entries only
+// for the "base" form of each instruction.  On the first call to
+// as2variant or variant2as, we'll add the variants to the table.
+var varianttable = [ppc64.ALAST][4]int{
+       ppc64.AADD:     [4]int{ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},
+       ppc64.AADDC:    [4]int{ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC},
+       ppc64.AADDE:    [4]int{ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC},
+       ppc64.AADDME:   [4]int{ppc64.AADDME, ppc64.AADDMECC, ppc64.AADDMEV, ppc64.AADDMEVCC},
+       ppc64.AADDZE:   [4]int{ppc64.AADDZE, ppc64.AADDZECC, ppc64.AADDZEV, ppc64.AADDZEVCC},
+       ppc64.AAND:     [4]int{ppc64.AAND, ppc64.AANDCC, 0, 0},
+       ppc64.AANDN:    [4]int{ppc64.AANDN, ppc64.AANDNCC, 0, 0},
+       ppc64.ACNTLZD:  [4]int{ppc64.ACNTLZD, ppc64.ACNTLZDCC, 0, 0},
+       ppc64.ACNTLZW:  [4]int{ppc64.ACNTLZW, ppc64.ACNTLZWCC, 0, 0},
+       ppc64.ADIVD:    [4]int{ppc64.ADIVD, ppc64.ADIVDCC, ppc64.ADIVDV, ppc64.ADIVDVCC},
+       ppc64.ADIVDU:   [4]int{ppc64.ADIVDU, ppc64.ADIVDUCC, ppc64.ADIVDUV, ppc64.ADIVDUVCC},
+       ppc64.ADIVW:    [4]int{ppc64.ADIVW, ppc64.ADIVWCC, ppc64.ADIVWV, ppc64.ADIVWVCC},
+       ppc64.ADIVWU:   [4]int{ppc64.ADIVWU, ppc64.ADIVWUCC, ppc64.ADIVWUV, ppc64.ADIVWUVCC},
+       ppc64.AEQV:     [4]int{ppc64.AEQV, ppc64.AEQVCC, 0, 0},
+       ppc64.AEXTSB:   [4]int{ppc64.AEXTSB, ppc64.AEXTSBCC, 0, 0},
+       ppc64.AEXTSH:   [4]int{ppc64.AEXTSH, ppc64.AEXTSHCC, 0, 0},
+       ppc64.AEXTSW:   [4]int{ppc64.AEXTSW, ppc64.AEXTSWCC, 0, 0},
+       ppc64.AFABS:    [4]int{ppc64.AFABS, ppc64.AFABSCC, 0, 0},
+       ppc64.AFADD:    [4]int{ppc64.AFADD, ppc64.AFADDCC, 0, 0},
+       ppc64.AFADDS:   [4]int{ppc64.AFADDS, ppc64.AFADDSCC, 0, 0},
+       ppc64.AFCFID:   [4]int{ppc64.AFCFID, ppc64.AFCFIDCC, 0, 0},
+       ppc64.AFCTID:   [4]int{ppc64.AFCTID, ppc64.AFCTIDCC, 0, 0},
+       ppc64.AFCTIDZ:  [4]int{ppc64.AFCTIDZ, ppc64.AFCTIDZCC, 0, 0},
+       ppc64.AFCTIW:   [4]int{ppc64.AFCTIW, ppc64.AFCTIWCC, 0, 0},
+       ppc64.AFCTIWZ:  [4]int{ppc64.AFCTIWZ, ppc64.AFCTIWZCC, 0, 0},
+       ppc64.AFDIV:    [4]int{ppc64.AFDIV, ppc64.AFDIVCC, 0, 0},
+       ppc64.AFDIVS:   [4]int{ppc64.AFDIVS, ppc64.AFDIVSCC, 0, 0},
+       ppc64.AFMADD:   [4]int{ppc64.AFMADD, ppc64.AFMADDCC, 0, 0},
+       ppc64.AFMADDS:  [4]int{ppc64.AFMADDS, ppc64.AFMADDSCC, 0, 0},
+       ppc64.AFMOVD:   [4]int{ppc64.AFMOVD, ppc64.AFMOVDCC, 0, 0},
+       ppc64.AFMSUB:   [4]int{ppc64.AFMSUB, ppc64.AFMSUBCC, 0, 0},
+       ppc64.AFMSUBS:  [4]int{ppc64.AFMSUBS, ppc64.AFMSUBSCC, 0, 0},
+       ppc64.AFMUL:    [4]int{ppc64.AFMUL, ppc64.AFMULCC, 0, 0},
+       ppc64.AFMULS:   [4]int{ppc64.AFMULS, ppc64.AFMULSCC, 0, 0},
+       ppc64.AFNABS:   [4]int{ppc64.AFNABS, ppc64.AFNABSCC, 0, 0},
+       ppc64.AFNEG:    [4]int{ppc64.AFNEG, ppc64.AFNEGCC, 0, 0},
+       ppc64.AFNMADD:  [4]int{ppc64.AFNMADD, ppc64.AFNMADDCC, 0, 0},
+       ppc64.AFNMADDS: [4]int{ppc64.AFNMADDS, ppc64.AFNMADDSCC, 0, 0},
+       ppc64.AFNMSUB:  [4]int{ppc64.AFNMSUB, ppc64.AFNMSUBCC, 0, 0},
+       ppc64.AFNMSUBS: [4]int{ppc64.AFNMSUBS, ppc64.AFNMSUBSCC, 0, 0},
+       ppc64.AFRES:    [4]int{ppc64.AFRES, ppc64.AFRESCC, 0, 0},
+       ppc64.AFRSP:    [4]int{ppc64.AFRSP, ppc64.AFRSPCC, 0, 0},
+       ppc64.AFRSQRTE: [4]int{ppc64.AFRSQRTE, ppc64.AFRSQRTECC, 0, 0},
+       ppc64.AFSEL:    [4]int{ppc64.AFSEL, ppc64.AFSELCC, 0, 0},
+       ppc64.AFSQRT:   [4]int{ppc64.AFSQRT, ppc64.AFSQRTCC, 0, 0},
+       ppc64.AFSQRTS:  [4]int{ppc64.AFSQRTS, ppc64.AFSQRTSCC, 0, 0},
+       ppc64.AFSUB:    [4]int{ppc64.AFSUB, ppc64.AFSUBCC, 0, 0},
+       ppc64.AFSUBS:   [4]int{ppc64.AFSUBS, ppc64.AFSUBSCC, 0, 0},
+       ppc64.AMTFSB0:  [4]int{ppc64.AMTFSB0, ppc64.AMTFSB0CC, 0, 0},
+       ppc64.AMTFSB1:  [4]int{ppc64.AMTFSB1, ppc64.AMTFSB1CC, 0, 0},
+       ppc64.AMULHD:   [4]int{ppc64.AMULHD, ppc64.AMULHDCC, 0, 0},
+       ppc64.AMULHDU:  [4]int{ppc64.AMULHDU, ppc64.AMULHDUCC, 0, 0},
+       ppc64.AMULHW:   [4]int{ppc64.AMULHW, ppc64.AMULHWCC, 0, 0},
+       ppc64.AMULHWU:  [4]int{ppc64.AMULHWU, ppc64.AMULHWUCC, 0, 0},
+       ppc64.AMULLD:   [4]int{ppc64.AMULLD, ppc64.AMULLDCC, ppc64.AMULLDV, ppc64.AMULLDVCC},
+       ppc64.AMULLW:   [4]int{ppc64.AMULLW, ppc64.AMULLWCC, ppc64.AMULLWV, ppc64.AMULLWVCC},
+       ppc64.ANAND:    [4]int{ppc64.ANAND, ppc64.ANANDCC, 0, 0},
+       ppc64.ANEG:     [4]int{ppc64.ANEG, ppc64.ANEGCC, ppc64.ANEGV, ppc64.ANEGVCC},
+       ppc64.ANOR:     [4]int{ppc64.ANOR, ppc64.ANORCC, 0, 0},
+       ppc64.AOR:      [4]int{ppc64.AOR, ppc64.AORCC, 0, 0},
+       ppc64.AORN:     [4]int{ppc64.AORN, ppc64.AORNCC, 0, 0},
+       ppc64.AREM:     [4]int{ppc64.AREM, ppc64.AREMCC, ppc64.AREMV, ppc64.AREMVCC},
+       ppc64.AREMD:    [4]int{ppc64.AREMD, ppc64.AREMDCC, ppc64.AREMDV, ppc64.AREMDVCC},
+       ppc64.AREMDU:   [4]int{ppc64.AREMDU, ppc64.AREMDUCC, ppc64.AREMDUV, ppc64.AREMDUVCC},
+       ppc64.AREMU:    [4]int{ppc64.AREMU, ppc64.AREMUCC, ppc64.AREMUV, ppc64.AREMUVCC},
+       ppc64.ARLDC:    [4]int{ppc64.ARLDC, ppc64.ARLDCCC, 0, 0},
+       ppc64.ARLDCL:   [4]int{ppc64.ARLDCL, ppc64.ARLDCLCC, 0, 0},
+       ppc64.ARLDCR:   [4]int{ppc64.ARLDCR, ppc64.ARLDCRCC, 0, 0},
+       ppc64.ARLDMI:   [4]int{ppc64.ARLDMI, ppc64.ARLDMICC, 0, 0},
+       ppc64.ARLWMI:   [4]int{ppc64.ARLWMI, ppc64.ARLWMICC, 0, 0},
+       ppc64.ARLWNM:   [4]int{ppc64.ARLWNM, ppc64.ARLWNMCC, 0, 0},
+       ppc64.ASLD:     [4]int{ppc64.ASLD, ppc64.ASLDCC, 0, 0},
+       ppc64.ASLW:     [4]int{ppc64.ASLW, ppc64.ASLWCC, 0, 0},
+       ppc64.ASRAD:    [4]int{ppc64.ASRAD, ppc64.ASRADCC, 0, 0},
+       ppc64.ASRAW:    [4]int{ppc64.ASRAW, ppc64.ASRAWCC, 0, 0},
+       ppc64.ASRD:     [4]int{ppc64.ASRD, ppc64.ASRDCC, 0, 0},
+       ppc64.ASRW:     [4]int{ppc64.ASRW, ppc64.ASRWCC, 0, 0},
+       ppc64.ASUB:     [4]int{ppc64.ASUB, ppc64.ASUBCC, ppc64.ASUBV, ppc64.ASUBVCC},
+       ppc64.ASUBC:    [4]int{ppc64.ASUBC, ppc64.ASUBCCC, ppc64.ASUBCV, ppc64.ASUBCVCC},
+       ppc64.ASUBE:    [4]int{ppc64.ASUBE, ppc64.ASUBECC, ppc64.ASUBEV, ppc64.ASUBEVCC},
+       ppc64.ASUBME:   [4]int{ppc64.ASUBME, ppc64.ASUBMECC, ppc64.ASUBMEV, ppc64.ASUBMEVCC},
+       ppc64.ASUBZE:   [4]int{ppc64.ASUBZE, ppc64.ASUBZECC, ppc64.ASUBZEV, ppc64.ASUBZEVCC},
+       ppc64.AXOR:     [4]int{ppc64.AXOR, ppc64.AXORCC, 0, 0},
+}
+
+var initvariants_initialized int
+
+func initvariants() {
+       if initvariants_initialized != 0 {
+               return
+       }
+       initvariants_initialized = 1
+
+       var j int
+       for i := int(0); i < len(varianttable); i++ {
+               if varianttable[i][0] == 0 {
+                       // Instruction has no variants
+                       varianttable[i][0] = i
+
+                       continue
+               }
+
+               // Copy base form to other variants
+               if varianttable[i][0] == i {
+                       for j = 0; j < len(varianttable[i]); j++ {
+                               varianttable[varianttable[i][j]] = varianttable[i]
+                       }
+               }
+       }
+}
+
+// as2variant returns the variant (V_*) flags of instruction as.
+func as2variant(as int) int {
+       initvariants()
+       for i := int(0); i < len(varianttable[as]); i++ {
+               if varianttable[as][i] == as {
+                       return i
+               }
+       }
+       gc.Fatalf("as2variant: instruction %v is not a variant of itself", obj.Aconv(as))
+       return 0
+}
+
+// variant2as returns the instruction as with the given variant (V_*) flags.
+// If no such variant exists, this returns 0.
+func variant2as(as int, flags int) int {
+       initvariants()
+       return varianttable[as][flags]
+}
diff --git a/src/cmd/compile/internal/mips64/reg.go b/src/cmd/compile/internal/mips64/reg.go
new file mode 100644 (file)
index 0000000..fa1cb71
--- /dev/null
@@ -0,0 +1,162 @@
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+//     Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//     Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//     Portions Copyright © 1997-1999 Vita Nuova Limited
+//     Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//     Portions Copyright © 2004,2006 Bruce Ellis
+//     Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//     Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//     Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import "cmd/internal/obj/ppc64"
+import "cmd/compile/internal/gc"
+
+const (
+       NREGVAR = 64 /* 32 general + 32 floating */
+)
+
+var regname = []string{
+       ".R0",
+       ".R1",
+       ".R2",
+       ".R3",
+       ".R4",
+       ".R5",
+       ".R6",
+       ".R7",
+       ".R8",
+       ".R9",
+       ".R10",
+       ".R11",
+       ".R12",
+       ".R13",
+       ".R14",
+       ".R15",
+       ".R16",
+       ".R17",
+       ".R18",
+       ".R19",
+       ".R20",
+       ".R21",
+       ".R22",
+       ".R23",
+       ".R24",
+       ".R25",
+       ".R26",
+       ".R27",
+       ".R28",
+       ".R29",
+       ".R30",
+       ".R31",
+       ".F0",
+       ".F1",
+       ".F2",
+       ".F3",
+       ".F4",
+       ".F5",
+       ".F6",
+       ".F7",
+       ".F8",
+       ".F9",
+       ".F10",
+       ".F11",
+       ".F12",
+       ".F13",
+       ".F14",
+       ".F15",
+       ".F16",
+       ".F17",
+       ".F18",
+       ".F19",
+       ".F20",
+       ".F21",
+       ".F22",
+       ".F23",
+       ".F24",
+       ".F25",
+       ".F26",
+       ".F27",
+       ".F28",
+       ".F29",
+       ".F30",
+       ".F31",
+}
+
+func regnames(n *int) []string {
+       *n = NREGVAR
+       return regname
+}
+
+func excludedregs() uint64 {
+       // Exclude registers with fixed functions
+       regbits := uint64(1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS))
+
+       // Also exclude floating point registers with fixed constants
+       regbits |= RtoB(ppc64.REG_F27) | RtoB(ppc64.REG_F28) | RtoB(ppc64.REG_F29) | RtoB(ppc64.REG_F30) | RtoB(ppc64.REG_F31)
+
+       return regbits
+}
+
+func doregbits(r int) uint64 {
+       return 0
+}
+
+/*
+ * track register variables including external registers:
+ *     bit     reg
+ *     0       R0
+ *     1       R1
+ *     ...     ...
+ *     31      R31
+ *     32+0    F0
+ *     32+1    F1
+ *     ...     ...
+ *     32+31   F31
+ */
+func RtoB(r int) uint64 {
+       if r > ppc64.REG_R0 && r <= ppc64.REG_R31 {
+               return 1 << uint(r-ppc64.REG_R0)
+       }
+       if r >= ppc64.REG_F0 && r <= ppc64.REG_F31 {
+               return 1 << uint(32+r-ppc64.REG_F0)
+       }
+       return 0
+}
+
+func BtoR(b uint64) int {
+       b &= 0xffffffff
+       if b == 0 {
+               return 0
+       }
+       return gc.Bitno(b) + ppc64.REG_R0
+}
+
+func BtoF(b uint64) int {
+       b >>= 32
+       if b == 0 {
+               return 0
+       }
+       return gc.Bitno(b) + ppc64.REG_F0
+}