]> Cypherpunks repositories - gostls13.git/commitdiff
cmd/compile: get MIPS64 SSA working
authorCherry Zhang <cherryyz@google.com>
Mon, 22 Aug 2016 16:25:23 +0000 (12:25 -0400)
committerCherry Zhang <cherryyz@google.com>
Thu, 25 Aug 2016 12:53:36 +0000 (12:53 +0000)
- implement *, /, %, shifts, Zero, Move.
- fix mistakes in comparison.
- fix floating point rounding.
- handle RetJmp in assembler (which was not handled, as a consequence
  Duff's device was disabled in the old backend.)

all.bash now passes with SSA on.

Updates #16359.

Change-Id: Ia14eed0ed1176b5d800592080c8f53dded7fe73f
Reviewed-on: https://go-review.googlesource.com/27592
Reviewed-by: David Chase <drchase@google.com>
Run-TryBot: Cherry Zhang <cherryyz@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>

src/cmd/compile/internal/mips64/ssa.go
src/cmd/compile/internal/ssa/config.go
src/cmd/compile/internal/ssa/gen/MIPS64.rules
src/cmd/compile/internal/ssa/gen/MIPS64Ops.go
src/cmd/compile/internal/ssa/gen/main.go
src/cmd/compile/internal/ssa/opGen.go
src/cmd/compile/internal/ssa/regalloc.go
src/cmd/compile/internal/ssa/rewriteMIPS64.go
src/cmd/compile/internal/ssa/schedule.go
src/cmd/internal/obj/mips/obj0.go

index 81a63833be3321120a06b06b2de47d43a55153d7..ca859d6c5c08ffda518365c349feb04faa995f8d 100644 (file)
@@ -90,9 +90,19 @@ var ssaRegToReg = []int16{
 // see ../../../../runtime/mheap.go:/minPhysPageSize
 const minZeroPage = 4096
 
+// isFPreg returns whether r is an FP register
+func isFPreg(r int16) bool {
+       return mips.REG_F0 <= r && r <= mips.REG_F31
+}
+
+// isHILO returns whether r is HI or LO register
+func isHILO(r int16) bool {
+       return r == mips.REG_HI || r == mips.REG_LO
+}
+
 // loadByType returns the load instruction of the given type.
 func loadByType(t ssa.Type, r int16) obj.As {
-       if mips.REG_F0 <= r && r <= mips.REG_F31 {
+       if isFPreg(r) {
                if t.IsFloat() && t.Size() == 4 { // float32
                        return mips.AMOVF
                } else { // float64 or integer in FP register
@@ -127,7 +137,7 @@ func loadByType(t ssa.Type, r int16) obj.As {
 
 // storeByType returns the store instruction of the given type.
 func storeByType(t ssa.Type, r int16) obj.As {
-       if mips.REG_F0 <= r && r <= mips.REG_F31 {
+       if isFPreg(r) {
                if t.IsFloat() && t.Size() == 4 { // float32
                        return mips.AMOVF
                } else { // float64 or integer in FP register
@@ -167,21 +177,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                        return
                }
                as := mips.AMOVV
-               if v.Type.IsFloat() {
-                       switch v.Type.Size() {
-                       case 4:
-                               as = mips.AMOVF
-                       case 8:
-                               as = mips.AMOVD
-                       default:
-                               panic("bad float size")
-                       }
+               if isFPreg(x) && isFPreg(y) {
+                       as = mips.AMOVD
                }
                p := gc.Prog(as)
                p.From.Type = obj.TYPE_REG
                p.From.Reg = x
                p.To.Type = obj.TYPE_REG
                p.To.Reg = y
+               if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) {
+                       // cannot move between special registers, use TMP as intermediate
+                       p.To.Reg = mips.REGTMP
+                       p = gc.Prog(mips.AMOVV)
+                       p.From.Type = obj.TYPE_REG
+                       p.From.Reg = mips.REGTMP
+                       p.To.Type = obj.TYPE_REG
+                       p.To.Reg = y
+               }
        case ssa.OpMIPS64MOVVnop:
                if gc.SSARegNum(v) != gc.SSARegNum(v.Args[0]) {
                        v.Fatalf("input[0] and output not in same register %s", v.LongString())
@@ -207,6 +219,15 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                }
                p.To.Type = obj.TYPE_REG
                p.To.Reg = r
+               if isHILO(r) {
+                       // cannot directly load, load to TMP and move
+                       p.To.Reg = mips.REGTMP
+                       p = gc.Prog(mips.AMOVV)
+                       p.From.Type = obj.TYPE_REG
+                       p.From.Reg = mips.REGTMP
+                       p.To.Type = obj.TYPE_REG
+                       p.To.Reg = r
+               }
        case ssa.OpPhi:
                gc.CheckLoweredPhi(v)
        case ssa.OpStoreReg:
@@ -215,6 +236,15 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                        return
                }
                r := gc.SSARegNum(v.Args[0])
+               if isHILO(r) {
+                       // cannot directly store, move to TMP and store
+                       p := gc.Prog(mips.AMOVV)
+                       p.From.Type = obj.TYPE_REG
+                       p.From.Reg = r
+                       p.To.Type = obj.TYPE_REG
+                       p.To.Reg = mips.REGTMP
+                       r = mips.REGTMP
+               }
                p := gc.Prog(storeByType(v.Type, r))
                p.From.Type = obj.TYPE_REG
                p.From.Reg = r
@@ -287,11 +317,21 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                p.From.Reg = gc.SSARegNum(v.Args[1])
                p.Reg = gc.SSARegNum(v.Args[0])
        case ssa.OpMIPS64MOVVconst:
+               r := gc.SSARegNum(v)
                p := gc.Prog(v.Op.Asm())
                p.From.Type = obj.TYPE_CONST
                p.From.Offset = v.AuxInt
                p.To.Type = obj.TYPE_REG
-               p.To.Reg = gc.SSARegNum(v)
+               p.To.Reg = r
+               if isFPreg(r) || isHILO(r) {
+                       // cannot move into FP or special registers, use TMP as intermediate
+                       p.To.Reg = mips.REGTMP
+                       p = gc.Prog(mips.AMOVV)
+                       p.From.Type = obj.TYPE_REG
+                       p.From.Reg = mips.REGTMP
+                       p.To.Type = obj.TYPE_REG
+                       p.To.Reg = r
+               }
        case ssa.OpMIPS64MOVFconst,
                ssa.OpMIPS64MOVDconst:
                p := gc.Prog(v.Op.Asm())
@@ -312,9 +352,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
        case ssa.OpMIPS64MOVVaddr:
                p := gc.Prog(mips.AMOVV)
                p.From.Type = obj.TYPE_ADDR
-               p.To.Type = obj.TYPE_REG
-               p.To.Reg = gc.SSARegNum(v)
-
                var wantreg string
                // MOVV $sym+off(base), R
                // the assembler expands it as the following:
@@ -339,6 +376,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                if reg := gc.SSAReg(v.Args[0]); reg.Name() != wantreg {
                        v.Fatalf("bad reg %s for symbol type %T, want %s", reg.Name(), v.Aux, wantreg)
                }
+               p.To.Type = obj.TYPE_REG
+               p.To.Reg = gc.SSARegNum(v)
        case ssa.OpMIPS64MOVBload,
                ssa.OpMIPS64MOVBUload,
                ssa.OpMIPS64MOVHload,
@@ -386,12 +425,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                fallthrough
        case ssa.OpMIPS64MOVWF,
                ssa.OpMIPS64MOVWD,
-               ssa.OpMIPS64MOVFW,
-               ssa.OpMIPS64MOVDW,
+               ssa.OpMIPS64TRUNCFW,
+               ssa.OpMIPS64TRUNCDW,
                ssa.OpMIPS64MOVVF,
                ssa.OpMIPS64MOVVD,
-               ssa.OpMIPS64MOVFV,
-               ssa.OpMIPS64MOVDV,
+               ssa.OpMIPS64TRUNCFV,
+               ssa.OpMIPS64TRUNCDV,
                ssa.OpMIPS64MOVFD,
                ssa.OpMIPS64MOVDF,
                ssa.OpMIPS64NEGF,
@@ -409,6 +448,119 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                p.Reg = mips.REGZERO
                p.To.Type = obj.TYPE_REG
                p.To.Reg = gc.SSARegNum(v)
+       case ssa.OpMIPS64DUFFZERO:
+               // runtime.duffzero expects start address - 8 in R1
+               p := gc.Prog(mips.ASUBVU)
+               p.From.Type = obj.TYPE_CONST
+               p.From.Offset = 8
+               p.Reg = gc.SSARegNum(v.Args[0])
+               p.To.Type = obj.TYPE_REG
+               p.To.Reg = mips.REG_R1
+               p = gc.Prog(obj.ADUFFZERO)
+               p.To.Type = obj.TYPE_MEM
+               p.To.Name = obj.NAME_EXTERN
+               p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
+               p.To.Offset = v.AuxInt
+       case ssa.OpMIPS64LoweredZero:
+               // SUBV $8, R1
+               // MOVV R0, 8(R1)
+               // ADDV $8, R1
+               // BNE  Rarg1, R1, -2(PC)
+               // arg1 is the address of the last element to zero
+               var sz int64
+               var mov obj.As
+               switch {
+               case v.AuxInt%8 == 0:
+                       sz = 8
+                       mov = mips.AMOVV
+               case v.AuxInt%4 == 0:
+                       sz = 4
+                       mov = mips.AMOVW
+               case v.AuxInt%2 == 0:
+                       sz = 2
+                       mov = mips.AMOVH
+               default:
+                       sz = 1
+                       mov = mips.AMOVB
+               }
+               p := gc.Prog(mips.ASUBVU)
+               p.From.Type = obj.TYPE_CONST
+               p.From.Offset = sz
+               p.To.Type = obj.TYPE_REG
+               p.To.Reg = mips.REG_R1
+               p2 := gc.Prog(mov)
+               p2.From.Type = obj.TYPE_REG
+               p2.From.Reg = mips.REGZERO
+               p2.To.Type = obj.TYPE_MEM
+               p2.To.Reg = mips.REG_R1
+               p2.To.Offset = sz
+               p3 := gc.Prog(mips.AADDVU)
+               p3.From.Type = obj.TYPE_CONST
+               p3.From.Offset = sz
+               p3.To.Type = obj.TYPE_REG
+               p3.To.Reg = mips.REG_R1
+               p4 := gc.Prog(mips.ABNE)
+               p4.From.Type = obj.TYPE_REG
+               p4.From.Reg = gc.SSARegNum(v.Args[1])
+               p4.Reg = mips.REG_R1
+               p4.To.Type = obj.TYPE_BRANCH
+               gc.Patch(p4, p2)
+       case ssa.OpMIPS64LoweredMove:
+               // SUBV $8, R1
+               // MOVV 8(R1), Rtmp
+               // MOVV Rtmp, (R2)
+               // ADDV $8, R1
+               // ADDV $8, R2
+               // BNE  Rarg2, R1, -4(PC)
+               // arg2 is the address of the last element of src
+               var sz int64
+               var mov obj.As
+               switch {
+               case v.AuxInt%8 == 0:
+                       sz = 8
+                       mov = mips.AMOVV
+               case v.AuxInt%4 == 0:
+                       sz = 4
+                       mov = mips.AMOVW
+               case v.AuxInt%2 == 0:
+                       sz = 2
+                       mov = mips.AMOVH
+               default:
+                       sz = 1
+                       mov = mips.AMOVB
+               }
+               p := gc.Prog(mips.ASUBVU)
+               p.From.Type = obj.TYPE_CONST
+               p.From.Offset = sz
+               p.To.Type = obj.TYPE_REG
+               p.To.Reg = mips.REG_R1
+               p2 := gc.Prog(mov)
+               p2.From.Type = obj.TYPE_MEM
+               p2.From.Reg = mips.REG_R1
+               p2.From.Offset = sz
+               p2.To.Type = obj.TYPE_REG
+               p2.To.Reg = mips.REGTMP
+               p3 := gc.Prog(mov)
+               p3.From.Type = obj.TYPE_REG
+               p3.From.Reg = mips.REGTMP
+               p3.To.Type = obj.TYPE_MEM
+               p3.To.Reg = mips.REG_R2
+               p4 := gc.Prog(mips.AADDVU)
+               p4.From.Type = obj.TYPE_CONST
+               p4.From.Offset = sz
+               p4.To.Type = obj.TYPE_REG
+               p4.To.Reg = mips.REG_R1
+               p5 := gc.Prog(mips.AADDVU)
+               p5.From.Type = obj.TYPE_CONST
+               p5.From.Offset = sz
+               p5.To.Type = obj.TYPE_REG
+               p5.To.Reg = mips.REG_R2
+               p6 := gc.Prog(mips.ABNE)
+               p6.From.Type = obj.TYPE_REG
+               p6.From.Reg = gc.SSARegNum(v.Args[2])
+               p6.Reg = mips.REG_R1
+               p6.To.Type = obj.TYPE_BRANCH
+               gc.Patch(p6, p2)
        case ssa.OpMIPS64CALLstatic:
                if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
                        // Deferred calls will appear to be returning to
@@ -468,7 +620,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                p.From.Reg = gc.SSARegNum(v.Args[0])
                gc.AddAux(&p.From, v)
                p.To.Type = obj.TYPE_REG
-               p.To.Reg = mips.REGZERO
+               p.To.Reg = mips.REGTMP
                if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
                        gc.Warnl(v.Line, "generated nil check")
                }
index b2f1fd97357ce75d273cbd93a0699903d6d311c5..6fc30a3847a49da5095928ef3bca69a9f2e67018 100644 (file)
@@ -22,6 +22,7 @@ type Config struct {
        registers       []Register                 // machine registers
        gpRegMask       regMask                    // general purpose integer register mask
        fpRegMask       regMask                    // floating point register mask
+       specialRegMask  regMask                    // special register mask
        FPReg           int8                       // register number of frame pointer, -1 if not used
        hasGReg         bool                       // has hardware g register
        fe              Frontend                   // callbacks into compiler frontend
@@ -202,6 +203,7 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config
                c.registers = registersMIPS64[:]
                c.gpRegMask = gpRegMaskMIPS64
                c.fpRegMask = fpRegMaskMIPS64
+               c.specialRegMask = specialRegMaskMIPS64
                c.FPReg = framepointerRegMIPS64
                c.hasGReg = true
        default:
index 5c5f0ca68b333bd292d813de82e972fe647329af..c342f76a2c616d2fede3f92b62b79019e0a1b2a3 100644 (file)
 (Sub32F x y) -> (SUBF x y)
 (Sub64F x y) -> (SUBD x y)
 
+(Mul64 x y) -> (Select1 (MULVU x y))
+(Mul32 x y) -> (Select1 (MULVU x y))
+(Mul16 x y) -> (Select1 (MULVU x y))
+(Mul8 x y) -> (Select1 (MULVU x y))
+(Mul32F x y) -> (MULF x y)
+(Mul64F x y) -> (MULD x y)
+
+(Hmul64 x y) -> (Select0 (MULV x y))
+(Hmul64u x y) -> (Select0 (MULVU x y))
+(Hmul32 x y) -> (SRAVconst (Select1 <config.fe.TypeInt64()> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
+(Hmul32u x y) -> (SRLVconst (Select1 <config.fe.TypeUInt64()> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
+(Hmul16 x y) -> (SRAVconst (Select1 <config.fe.TypeInt32()> (MULV (SignExt16to64 x) (SignExt16to64 y))) [16])
+(Hmul16u x y) -> (SRLVconst (Select1 <config.fe.TypeUInt32()> (MULVU (ZeroExt16to64 x) (ZeroExt16to64 y))) [16])
+(Hmul8 x y) -> (SRAVconst (Select1 <config.fe.TypeInt16()> (MULV (SignExt8to64 x) (SignExt8to64 y))) [8])
+(Hmul8u x y) -> (SRLVconst (Select1 <config.fe.TypeUInt16()> (MULVU (ZeroExt8to64 x) (ZeroExt8to64 y))) [8])
+
+(Div64 x y) -> (Select1 (DIVV x y))
+(Div64u x y) -> (Select1 (DIVVU x y))
+(Div32 x y) -> (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+(Div32u x y) -> (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Div16 x y) -> (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+(Div16u x y) -> (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Div8 x y) -> (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+(Div8u x y) -> (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Div32F x y) -> (DIVF x y)
+(Div64F x y) -> (DIVD x y)
+
+(Mod64 x y) -> (Select0 (DIVV x y))
+(Mod64u x y) -> (Select0 (DIVVU x y))
+(Mod32 x y) -> (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+(Mod32u x y) -> (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Mod16 x y) -> (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+(Mod16u x y) -> (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Mod8 x y) -> (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+(Mod8u x y) -> (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+
+(Avg64u <t> x y) -> (ADDV (ADDV <t> (SRLVconst <t> x [1]) (SRLVconst <t> y [1])) (AND <t> (AND <t> x y) (MOVVconst [1])))
+
 (And64 x y) -> (AND x y)
 (And32 x y) -> (AND x y)
 (And16 x y) -> (AND x y)
 (Xor16 x y) -> (XOR x y)
 (Xor8 x y) -> (XOR x y)
 
+// shifts
+// hardware instruction uses only the low 6 bits of the shift
+// we compare to 64 to ensure Go semantics for large shifts
+(Lsh64x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
+(Lsh64x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh64x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh64x8  <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SLLV <t> x (ZeroExt8to64  y)))
+
+(Lsh32x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
+(Lsh32x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh32x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh32x8  <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SLLV <t> x (ZeroExt8to64  y)))
+
+(Lsh16x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
+(Lsh16x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh16x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh16x8  <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SLLV <t> x (ZeroExt8to64  y)))
+
+(Lsh8x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
+(Lsh8x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh8x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh8x8  <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SLLV <t> x (ZeroExt8to64  y)))
+
+(Rsh64Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> x y))
+(Rsh64Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
+(Rsh64Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
+(Rsh64Ux8  <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SRLV <t> x (ZeroExt8to64  y)))
+
+(Rsh32Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
+(Rsh32Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Rsh32Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
+(Rsh32Ux8  <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64  y)))
+
+(Rsh16Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
+(Rsh16Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
+(Rsh16Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Rsh16Ux8  <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64  y)))
+
+(Rsh8Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
+(Rsh8Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
+(Rsh8Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
+(Rsh8Ux8  <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64  y)))
+
+(Rsh64x64 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
+(Rsh64x32 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
+(Rsh64x16 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
+(Rsh64x8  <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64  y)))
+
+(Rsh32x64 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
+(Rsh32x32 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
+(Rsh32x16 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
+(Rsh32x8  <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64  y)))
+
+(Rsh16x64 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
+(Rsh16x32 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
+(Rsh16x16 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
+(Rsh16x8  <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64  y)))
+
+(Rsh8x64 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
+(Rsh8x32 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
+(Rsh8x16 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
+(Rsh8x8  <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64  y)))
+
 // unary ops
 (Neg64 x) -> (NEGV x)
 (Neg32 x) -> (NEGV x)
 (Cvt32to64F x) -> (MOVWD x)
 (Cvt64to32F x) -> (MOVVF x)
 (Cvt64to64F x) -> (MOVVD x)
-(Cvt32Fto32 x) -> (MOVFW x)
-(Cvt64Fto32 x) -> (MOVDW x)
-(Cvt32Fto64 x) -> (MOVFV x)
-(Cvt64Fto64 x) -> (MOVDV x)
+(Cvt32Fto32 x) -> (TRUNCFW x)
+(Cvt64Fto32 x) -> (TRUNCDW x)
+(Cvt32Fto64 x) -> (TRUNCFV x)
+(Cvt64Fto64 x) -> (TRUNCDV x)
 (Cvt32Fto64F x) -> (MOVFD x)
 (Cvt64Fto32F x) -> (MOVDF x)
 
 // comparisons
-(Eq8 x y)  -> (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
-(Eq16 x y) -> (SGTU (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)) (MOVVconst [0]))
-(Eq32 x y) -> (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
-(Eq64 x y) -> (SGTU (XOR x y) (MOVVconst [0]))
-(EqPtr x y) -> (SGTU (XOR x y) (MOVVconst [0]))
+(Eq8 x y)  -> (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Eq16 x y) -> (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Eq32 x y) -> (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Eq64 x y) -> (SGTU (MOVVconst [1]) (XOR x y))
+(EqPtr x y) -> (SGTU (MOVVconst [1]) (XOR x y))
 (Eq32F x y) -> (FPFlagTrue (CMPEQF x y))
 (Eq64F x y) -> (FPFlagTrue (CMPEQD x y))
 
-(Neq8 x y)  -> (SGTU (MOVVconst [0]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
-(Neq16 x y) -> (SGTU (MOVVconst [0]) (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)))
-(Neq32 x y) -> (SGTU (MOVVconst [0]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) )
-(Neq64 x y) -> (SGTU (MOVVconst [0]) (XOR x y))
-(NeqPtr x y) -> (SGTU (MOVVconst [0]) (XOR x y))
+(Neq8 x y)  -> (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
+(Neq16 x y) -> (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
+(Neq32 x y) -> (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
+(Neq64 x y) -> (SGTU (XOR x y) (MOVVconst [0]))
+(NeqPtr x y) -> (SGTU (XOR x y) (MOVVconst [0]))
 (Neq32F x y) -> (FPFlagFalse (CMPEQF x y))
 (Neq64F x y) -> (FPFlagFalse (CMPEQD x y))
 
 (Less32F x y) -> (FPFlagTrue (CMPGTF y x)) // reverse operands to work around NaN
 (Less64F x y) -> (FPFlagTrue (CMPGTD y x)) // reverse operands to work around NaN
 
-(Less8U x y)  -> (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
-(Less16U x y) -> (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
+(Less8U x y)  -> (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
+(Less16U x y) -> (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
 (Less32U x y) -> (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
 (Less64U x y) -> (SGTU y x)
 
 (Store [4] ptr val mem) && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
 (Store [8] ptr val mem) && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
 
+// zeroing
+(Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore ptr (MOVVconst [0]) mem)
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 ->
+       (MOVHstore ptr (MOVVconst [0]) mem)
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 2 ->
+       (MOVBstore [1] ptr (MOVVconst [0])
+               (MOVBstore [0] ptr (MOVVconst [0]) mem))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 ->
+       (MOVWstore ptr (MOVVconst [0]) mem)
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 ->
+       (MOVHstore [2] ptr (MOVVconst [0])
+               (MOVHstore [0] ptr (MOVVconst [0]) mem))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 4 ->
+       (MOVBstore [3] ptr (MOVVconst [0])
+               (MOVBstore [2] ptr (MOVVconst [0])
+                       (MOVBstore [1] ptr (MOVVconst [0])
+                               (MOVBstore [0] ptr (MOVVconst [0]) mem))))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0 ->
+       (MOVVstore ptr (MOVVconst [0]) mem)
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0 ->
+       (MOVWstore [4] ptr (MOVVconst [0])
+               (MOVWstore [0] ptr (MOVVconst [0]) mem))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 4 ->
+       (MOVHstore [6] ptr (MOVVconst [0])
+               (MOVHstore [4] ptr (MOVVconst [0])
+                       (MOVHstore [2] ptr (MOVVconst [0])
+                               (MOVHstore [0] ptr (MOVVconst [0]) mem))))
+
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 3 ->
+       (MOVBstore [2] ptr (MOVVconst [0])
+               (MOVBstore [1] ptr (MOVVconst [0])
+                       (MOVBstore [0] ptr (MOVVconst [0]) mem)))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0 ->
+       (MOVHstore [4] ptr (MOVVconst [0])
+               (MOVHstore [2] ptr (MOVVconst [0])
+                       (MOVHstore [0] ptr (MOVVconst [0]) mem)))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0 ->
+       (MOVWstore [8] ptr (MOVVconst [0])
+               (MOVWstore [4] ptr (MOVVconst [0])
+                       (MOVWstore [0] ptr (MOVVconst [0]) mem)))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0 ->
+       (MOVVstore [8] ptr (MOVVconst [0])
+               (MOVVstore [0] ptr (MOVVconst [0]) mem))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0 ->
+       (MOVVstore [16] ptr (MOVVconst [0])
+               (MOVVstore [8] ptr (MOVVconst [0])
+                       (MOVVstore [0] ptr (MOVVconst [0]) mem)))
+
+// medium zeroing uses a duff device
+// 8, and 128 are magic constants, see runtime/mkduff.go
+(Zero [s] ptr mem)
+       && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size() <= 8*128
+       && SizeAndAlign(s).Align()%8 == 0 && !config.noDuffDevice ->
+       (DUFFZERO [8 * (128 - int64(SizeAndAlign(s).Size()/8))] ptr mem)
+
+// large or unaligned zeroing uses a loop
+(Zero [s] ptr mem)
+       && (SizeAndAlign(s).Size() > 8*128 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0 ->
+       (LoweredZero [SizeAndAlign(s).Align()]
+               ptr
+               (ADDVconst <ptr.Type> ptr [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])
+               mem)
+
+// moves
+(Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 ->
+       (MOVHstore dst (MOVHload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 ->
+       (MOVBstore [1] dst (MOVBload [1] src mem)
+               (MOVBstore dst (MOVBload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 ->
+       (MOVWstore dst (MOVWload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 ->
+       (MOVHstore [2] dst (MOVHload [2] src mem)
+               (MOVHstore dst (MOVHload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 ->
+       (MOVBstore [3] dst (MOVBload [3] src mem)
+               (MOVBstore [2] dst (MOVBload [2] src mem)
+                       (MOVBstore [1] dst (MOVBload [1] src mem)
+                               (MOVBstore dst (MOVBload src mem) mem))))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0 ->
+       (MOVVstore dst (MOVVload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0 ->
+       (MOVWstore [4] dst (MOVWload [4] src mem)
+               (MOVWstore dst (MOVWload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0 ->
+       (MOVHstore [6] dst (MOVHload [6] src mem)
+               (MOVHstore [4] dst (MOVHload [4] src mem)
+                       (MOVHstore [2] dst (MOVHload [2] src mem)
+                               (MOVHstore dst (MOVHload src mem) mem))))
+
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 3 ->
+       (MOVBstore [2] dst (MOVBload [2] src mem)
+               (MOVBstore [1] dst (MOVBload [1] src mem)
+                       (MOVBstore dst (MOVBload src mem) mem)))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0 ->
+       (MOVHstore [4] dst (MOVHload [4] src mem)
+               (MOVHstore [2] dst (MOVHload [2] src mem)
+                       (MOVHstore dst (MOVHload src mem) mem)))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0 ->
+       (MOVWstore [8] dst (MOVWload [8] src mem)
+               (MOVWstore [4] dst (MOVWload [4] src mem)
+                       (MOVWstore dst (MOVWload src mem) mem)))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0 ->
+       (MOVVstore [8] dst (MOVVload [8] src mem)
+               (MOVVstore dst (MOVVload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0 ->
+       (MOVVstore [16] dst (MOVVload [16] src mem)
+               (MOVVstore [8] dst (MOVVload [8] src mem)
+                       (MOVVstore dst (MOVVload src mem) mem)))
+
+// large or unaligned move uses a loop
+(Move [s] dst src mem)
+       && SizeAndAlign(s).Size() > 24 || SizeAndAlign(s).Align()%8 != 0 ->
+       (LoweredMove [SizeAndAlign(s).Align()]
+               dst
+               src
+               (ADDVconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])
+               mem)
+
 // calls
 (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
 (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
index f0b61138ca00715f381cc2cce9b1b613a5188bcb..1d52e394f4108da8ad4dbb1b1e8435b35879631f 100644 (file)
@@ -160,14 +160,14 @@ func init() {
        )
        ops := []opData{
                // binary ops
-               {name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true},     // arg0 + arg1
-               {name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"},   // arg0 + auxInt
-               {name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"},                        // arg0 - arg1
-               {name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"},     // arg0 - auxInt
-               {name: "MULV", argLength: 2, reg: gp2hilo, asm: "MULV", commutative: true},   // arg0 * arg1, signed, results hi,lo
-               {name: "MULVU", argLength: 2, reg: gp2hilo, asm: "MULVU", commutative: true}, // arg0 * arg1, unsigned, results hi,lo
-               {name: "DIVV", argLength: 2, reg: gp2hilo, asm: "DIVV"},                      // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
-               {name: "DIVVU", argLength: 2, reg: gp2hilo, asm: "DIVVU"},                    // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
+               {name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true},                             // arg0 + arg1
+               {name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"},                           // arg0 + auxInt
+               {name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"},                                                // arg0 - arg1
+               {name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"},                             // arg0 - auxInt
+               {name: "MULV", argLength: 2, reg: gp2hilo, asm: "MULV", commutative: true, typ: "(Int64,Int64)"},     // arg0 * arg1, signed, results hi,lo
+               {name: "MULVU", argLength: 2, reg: gp2hilo, asm: "MULVU", commutative: true, typ: "(UInt64,UInt64)"}, // arg0 * arg1, unsigned, results hi,lo
+               {name: "DIVV", argLength: 2, reg: gp2hilo, asm: "DIVV", typ: "(Int64,Int64)"},                        // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
+               {name: "DIVVU", argLength: 2, reg: gp2hilo, asm: "DIVVU", typ: "(UInt64,UInt64)"},                    // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
 
                {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1
                {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1
@@ -252,16 +252,16 @@ func init() {
 
                {name: "MOVVnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
 
-               {name: "MOVWF", argLength: 1, reg: fp11, asm: "MOVWF"}, // int32 -> float32
-               {name: "MOVWD", argLength: 1, reg: fp11, asm: "MOVWD"}, // int32 -> float64
-               {name: "MOVVF", argLength: 1, reg: fp11, asm: "MOVVF"}, // int64 -> float32
-               {name: "MOVVD", argLength: 1, reg: fp11, asm: "MOVVD"}, // int64 -> float64
-               {name: "MOVFW", argLength: 1, reg: fp11, asm: "MOVFW"}, // float32 -> int32
-               {name: "MOVDW", argLength: 1, reg: fp11, asm: "MOVDW"}, // float64 -> int32
-               {name: "MOVFV", argLength: 1, reg: fp11, asm: "MOVFV"}, // float32 -> int64
-               {name: "MOVDV", argLength: 1, reg: fp11, asm: "MOVDV"}, // float64 -> int64
-               {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64
-               {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
+               {name: "MOVWF", argLength: 1, reg: fp11, asm: "MOVWF"},     // int32 -> float32
+               {name: "MOVWD", argLength: 1, reg: fp11, asm: "MOVWD"},     // int32 -> float64
+               {name: "MOVVF", argLength: 1, reg: fp11, asm: "MOVVF"},     // int64 -> float32
+               {name: "MOVVD", argLength: 1, reg: fp11, asm: "MOVVD"},     // int64 -> float64
+               {name: "TRUNCFW", argLength: 1, reg: fp11, asm: "TRUNCFW"}, // float32 -> int32
+               {name: "TRUNCDW", argLength: 1, reg: fp11, asm: "TRUNCDW"}, // float64 -> int32
+               {name: "TRUNCFV", argLength: 1, reg: fp11, asm: "TRUNCFV"}, // float32 -> int64
+               {name: "TRUNCDV", argLength: 1, reg: fp11, asm: "TRUNCDV"}, // float64 -> int64
+               {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"},     // float32 -> float64
+               {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"},     // float64 -> float32
 
                // function calls
                {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true},                                              // call static function aux.(*gc.Sym).  arg0=mem, auxint=argsize, returns mem
@@ -270,6 +270,67 @@ func init() {
                {name: "CALLgo", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true},                                                   // call newproc.  arg0=mem, auxint=argsize, returns mem
                {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true},                         // call fn by pointer.  arg0=codeptr, arg1=mem, auxint=argsize, returns mem
 
+               // duffzero
+               // arg0 = address of memory to zero
+               // arg1 = mem
+               // auxint = offset into duffzero code to start executing
+               // returns mem
+               // R1 aka mips.REGRT1 changed as side effect
+               {
+                       name:      "DUFFZERO",
+                       aux:       "Int64",
+                       argLength: 2,
+                       reg: regInfo{
+                               inputs:   []regMask{gp},
+                               clobbers: buildReg("R1"),
+                       },
+               },
+
+               // large or unaligned zeroing
+               // arg0 = address of memory to zero (in R1, changed as side effect)
+               // arg1 = address of the last element to zero
+               // arg2 = mem
+               // auxint = alignment
+               // returns mem
+               //      SUBV    $8, R1
+               //      MOVV    R0, 8(R1)
+               //      ADDV    $8, R1
+               //      BNE     Rarg1, R1, -2(PC)
+               {
+                       name:      "LoweredZero",
+                       aux:       "Int64",
+                       argLength: 3,
+                       reg: regInfo{
+                               inputs:   []regMask{buildReg("R1"), gp},
+                               clobbers: buildReg("R1"),
+                       },
+                       clobberFlags: true,
+               },
+
+               // large or unaligned move
+               // arg0 = address of dst memory (in R2, changed as side effect)
+               // arg1 = address of src memory (in R1, changed as side effect)
+               // arg2 = address of the last element of src
+               // arg3 = mem
+               // auxint = alignment
+               // returns mem
+               //      SUBV    $8, R1
+               //      MOVV    8(R1), Rtmp
+               //      MOVV    Rtmp, (R2)
+               //      ADDV    $8, R1
+               //      ADDV    $8, R2
+               //      BNE     Rarg2, R1, -4(PC)
+               {
+                       name:      "LoweredMove",
+                       aux:       "Int64",
+                       argLength: 4,
+                       reg: regInfo{
+                               inputs:   []regMask{buildReg("R2"), buildReg("R1"), gp},
+                               clobbers: buildReg("R1 R2"),
+                       },
+                       clobberFlags: true,
+               },
+
                // pseudo-ops
                {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}}, // panic if arg0 is nil.  arg1=mem.
 
@@ -309,6 +370,7 @@ func init() {
                regnames:        regNamesMIPS64,
                gpregmask:       gp,
                fpregmask:       fp,
+               specialregmask:  hi | lo,
                framepointerreg: -1, // not used
        })
 }
index 19c1bc716327b8a33689e1ec48e6a3567998a2c4..059315542eaa0d79bde38d220f76ad65a5e29157 100644 (file)
@@ -29,6 +29,7 @@ type arch struct {
        regnames        []string
        gpregmask       regMask
        fpregmask       regMask
+       specialregmask  regMask
        framepointerreg int8
        generic         bool
 }
@@ -241,6 +242,7 @@ func genOp() {
                fmt.Fprintln(w, "}")
                fmt.Fprintf(w, "var gpRegMask%s = regMask(%d)\n", a.name, a.gpregmask)
                fmt.Fprintf(w, "var fpRegMask%s = regMask(%d)\n", a.name, a.fpregmask)
+               fmt.Fprintf(w, "var specialRegMask%s = regMask(%d)\n", a.name, a.specialregmask)
                fmt.Fprintf(w, "var framepointerReg%s = int8(%d)\n", a.name, a.framepointerreg)
        }
 
index 80b5d72dce56cede9c7339727510a9df89bb70aa..61386126772f97af0b9ab5659ac087cefff97ae1 100644 (file)
@@ -1051,10 +1051,10 @@ const (
        OpMIPS64MOVWD
        OpMIPS64MOVVF
        OpMIPS64MOVVD
-       OpMIPS64MOVFW
-       OpMIPS64MOVDW
-       OpMIPS64MOVFV
-       OpMIPS64MOVDV
+       OpMIPS64TRUNCFW
+       OpMIPS64TRUNCDW
+       OpMIPS64TRUNCFV
+       OpMIPS64TRUNCDV
        OpMIPS64MOVFD
        OpMIPS64MOVDF
        OpMIPS64CALLstatic
@@ -1062,6 +1062,9 @@ const (
        OpMIPS64CALLdefer
        OpMIPS64CALLgo
        OpMIPS64CALLinter
+       OpMIPS64DUFFZERO
+       OpMIPS64LoweredZero
+       OpMIPS64LoweredMove
        OpMIPS64LoweredNilCheck
        OpMIPS64FPFlagTrue
        OpMIPS64FPFlagFalse
@@ -12905,9 +12908,9 @@ var opcodeTable = [...]opInfo{
                },
        },
        {
-               name:   "MOVFW",
+               name:   "TRUNCFW",
                argLen: 1,
-               asm:    mips.AMOVFW,
+               asm:    mips.ATRUNCFW,
                reg: regInfo{
                        inputs: []inputInfo{
                                {0, 385057768005959680}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F25 F27 F29 F31
@@ -12918,9 +12921,9 @@ var opcodeTable = [...]opInfo{
                },
        },
        {
-               name:   "MOVDW",
+               name:   "TRUNCDW",
                argLen: 1,
-               asm:    mips.AMOVDW,
+               asm:    mips.ATRUNCDW,
                reg: regInfo{
                        inputs: []inputInfo{
                                {0, 385057768005959680}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F25 F27 F29 F31
@@ -12931,9 +12934,9 @@ var opcodeTable = [...]opInfo{
                },
        },
        {
-               name:   "MOVFV",
+               name:   "TRUNCFV",
                argLen: 1,
-               asm:    mips.AMOVFV,
+               asm:    mips.ATRUNCFV,
                reg: regInfo{
                        inputs: []inputInfo{
                                {0, 385057768005959680}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F25 F27 F29 F31
@@ -12944,9 +12947,9 @@ var opcodeTable = [...]opInfo{
                },
        },
        {
-               name:   "MOVDV",
+               name:   "TRUNCDV",
                argLen: 1,
-               asm:    mips.AMOVDV,
+               asm:    mips.ATRUNCDV,
                reg: regInfo{
                        inputs: []inputInfo{
                                {0, 385057768005959680}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F25 F27 F29 F31
@@ -13034,6 +13037,44 @@ var opcodeTable = [...]opInfo{
                        clobbers: 2114440025016893438, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F25 F27 F29 F31 HI LO
                },
        },
+       {
+               name:    "DUFFZERO",
+               auxType: auxInt64,
+               argLen:  2,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 33554430}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25
+                       },
+                       clobbers: 2, // R1
+               },
+       },
+       {
+               name:         "LoweredZero",
+               auxType:      auxInt64,
+               argLen:       3,
+               clobberFlags: true,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 2},        // R1
+                               {1, 33554430}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25
+                       },
+                       clobbers: 2, // R1
+               },
+       },
+       {
+               name:         "LoweredMove",
+               auxType:      auxInt64,
+               argLen:       4,
+               clobberFlags: true,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 4},        // R2
+                               {1, 2},        // R1
+                               {2, 33554430}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25
+                       },
+                       clobbers: 6, // R1 R2
+               },
+       },
        {
                name:   "LoweredNilCheck",
                argLen: 2,
@@ -16141,6 +16182,7 @@ var registers386 = [...]Register{
 }
 var gpRegMask386 = regMask(239)
 var fpRegMask386 = regMask(65280)
+var specialRegMask386 = regMask(0)
 var framepointerReg386 = int8(5)
 var registersAMD64 = [...]Register{
        {0, "AX"},
@@ -16179,6 +16221,7 @@ var registersAMD64 = [...]Register{
 }
 var gpRegMaskAMD64 = regMask(65519)
 var fpRegMaskAMD64 = regMask(4294901760)
+var specialRegMaskAMD64 = regMask(0)
 var framepointerRegAMD64 = int8(5)
 var registersARM = [...]Register{
        {0, "R0"},
@@ -16217,6 +16260,7 @@ var registersARM = [...]Register{
 }
 var gpRegMaskARM = regMask(5119)
 var fpRegMaskARM = regMask(4294901760)
+var specialRegMaskARM = regMask(0)
 var framepointerRegARM = int8(-1)
 var registersARM64 = [...]Register{
        {0, "R0"},
@@ -16285,6 +16329,7 @@ var registersARM64 = [...]Register{
 }
 var gpRegMaskARM64 = regMask(133955583)
 var fpRegMaskARM64 = regMask(288230375077969920)
+var specialRegMaskARM64 = regMask(0)
 var framepointerRegARM64 = int8(-1)
 var registersMIPS64 = [...]Register{
        {0, "R0"},
@@ -16352,6 +16397,7 @@ var registersMIPS64 = [...]Register{
 }
 var gpRegMaskMIPS64 = regMask(33554430)
 var fpRegMaskMIPS64 = regMask(385057768005959680)
+var specialRegMaskMIPS64 = regMask(1729382256910270464)
 var framepointerRegMIPS64 = int8(-1)
 var registersPPC64 = [...]Register{
        {0, "SP"},
@@ -16415,4 +16461,5 @@ var registersPPC64 = [...]Register{
 }
 var gpRegMaskPPC64 = regMask(536866812)
 var fpRegMaskPPC64 = regMask(288230371856744448)
+var specialRegMaskPPC64 = regMask(0)
 var framepointerRegPPC64 = int8(0)
index 5aba193aded006953b84e7b01da2f7860c0566dc..3dc9fad28a9f95fcb1aee3488734fbb5e61a46de 100644 (file)
@@ -471,7 +471,7 @@ func (s *regAllocState) init(f *Func) {
        }
 
        // Figure out which registers we're allowed to use.
-       s.allocatable = s.f.Config.gpRegMask | s.f.Config.fpRegMask
+       s.allocatable = s.f.Config.gpRegMask | s.f.Config.fpRegMask | s.f.Config.specialRegMask
        s.allocatable &^= 1 << s.SPReg
        s.allocatable &^= 1 << s.SBReg
        if s.f.Config.hasGReg {
@@ -1302,7 +1302,7 @@ func (s *regAllocState) regalloc(f *Func) {
                        // We assume that a control input can be passed in any
                        // type-compatible register. If this turns out not to be true,
                        // we'll need to introduce a regspec for a block's control value.
-                       s.allocValToReg(v, s.compatRegs(v.Type), false, b.Line)
+                       b.Control = s.allocValToReg(v, s.compatRegs(v.Type), false, b.Line)
                        // Remove this use from the uses list.
                        vi := &s.values[v.ID]
                        u := vi.uses
index 8ab23eb2df2405084533ecb18a85e896848465e3..1c82c0c94940f87cf994274ca65c7c5636dddef1 100644 (file)
@@ -34,6 +34,8 @@ func rewriteValueMIPS64(v *Value, config *Config) bool {
                return rewriteValueMIPS64_OpAnd8(v, config)
        case OpAndB:
                return rewriteValueMIPS64_OpAndB(v, config)
+       case OpAvg64u:
+               return rewriteValueMIPS64_OpAvg64u(v, config)
        case OpClosureCall:
                return rewriteValueMIPS64_OpClosureCall(v, config)
        case OpCom16:
@@ -84,6 +86,26 @@ func rewriteValueMIPS64(v *Value, config *Config) bool {
                return rewriteValueMIPS64_OpCvt64to64F(v, config)
        case OpDeferCall:
                return rewriteValueMIPS64_OpDeferCall(v, config)
+       case OpDiv16:
+               return rewriteValueMIPS64_OpDiv16(v, config)
+       case OpDiv16u:
+               return rewriteValueMIPS64_OpDiv16u(v, config)
+       case OpDiv32:
+               return rewriteValueMIPS64_OpDiv32(v, config)
+       case OpDiv32F:
+               return rewriteValueMIPS64_OpDiv32F(v, config)
+       case OpDiv32u:
+               return rewriteValueMIPS64_OpDiv32u(v, config)
+       case OpDiv64:
+               return rewriteValueMIPS64_OpDiv64(v, config)
+       case OpDiv64F:
+               return rewriteValueMIPS64_OpDiv64F(v, config)
+       case OpDiv64u:
+               return rewriteValueMIPS64_OpDiv64u(v, config)
+       case OpDiv8:
+               return rewriteValueMIPS64_OpDiv8(v, config)
+       case OpDiv8u:
+               return rewriteValueMIPS64_OpDiv8u(v, config)
        case OpEq16:
                return rewriteValueMIPS64_OpEq16(v, config)
        case OpEq32:
@@ -144,6 +166,22 @@ func rewriteValueMIPS64(v *Value, config *Config) bool {
                return rewriteValueMIPS64_OpGreater8(v, config)
        case OpGreater8U:
                return rewriteValueMIPS64_OpGreater8U(v, config)
+       case OpHmul16:
+               return rewriteValueMIPS64_OpHmul16(v, config)
+       case OpHmul16u:
+               return rewriteValueMIPS64_OpHmul16u(v, config)
+       case OpHmul32:
+               return rewriteValueMIPS64_OpHmul32(v, config)
+       case OpHmul32u:
+               return rewriteValueMIPS64_OpHmul32u(v, config)
+       case OpHmul64:
+               return rewriteValueMIPS64_OpHmul64(v, config)
+       case OpHmul64u:
+               return rewriteValueMIPS64_OpHmul64u(v, config)
+       case OpHmul8:
+               return rewriteValueMIPS64_OpHmul8(v, config)
+       case OpHmul8u:
+               return rewriteValueMIPS64_OpHmul8u(v, config)
        case OpInterCall:
                return rewriteValueMIPS64_OpInterCall(v, config)
        case OpIsInBounds:
@@ -194,6 +232,68 @@ func rewriteValueMIPS64(v *Value, config *Config) bool {
                return rewriteValueMIPS64_OpLess8U(v, config)
        case OpLoad:
                return rewriteValueMIPS64_OpLoad(v, config)
+       case OpLsh16x16:
+               return rewriteValueMIPS64_OpLsh16x16(v, config)
+       case OpLsh16x32:
+               return rewriteValueMIPS64_OpLsh16x32(v, config)
+       case OpLsh16x64:
+               return rewriteValueMIPS64_OpLsh16x64(v, config)
+       case OpLsh16x8:
+               return rewriteValueMIPS64_OpLsh16x8(v, config)
+       case OpLsh32x16:
+               return rewriteValueMIPS64_OpLsh32x16(v, config)
+       case OpLsh32x32:
+               return rewriteValueMIPS64_OpLsh32x32(v, config)
+       case OpLsh32x64:
+               return rewriteValueMIPS64_OpLsh32x64(v, config)
+       case OpLsh32x8:
+               return rewriteValueMIPS64_OpLsh32x8(v, config)
+       case OpLsh64x16:
+               return rewriteValueMIPS64_OpLsh64x16(v, config)
+       case OpLsh64x32:
+               return rewriteValueMIPS64_OpLsh64x32(v, config)
+       case OpLsh64x64:
+               return rewriteValueMIPS64_OpLsh64x64(v, config)
+       case OpLsh64x8:
+               return rewriteValueMIPS64_OpLsh64x8(v, config)
+       case OpLsh8x16:
+               return rewriteValueMIPS64_OpLsh8x16(v, config)
+       case OpLsh8x32:
+               return rewriteValueMIPS64_OpLsh8x32(v, config)
+       case OpLsh8x64:
+               return rewriteValueMIPS64_OpLsh8x64(v, config)
+       case OpLsh8x8:
+               return rewriteValueMIPS64_OpLsh8x8(v, config)
+       case OpMod16:
+               return rewriteValueMIPS64_OpMod16(v, config)
+       case OpMod16u:
+               return rewriteValueMIPS64_OpMod16u(v, config)
+       case OpMod32:
+               return rewriteValueMIPS64_OpMod32(v, config)
+       case OpMod32u:
+               return rewriteValueMIPS64_OpMod32u(v, config)
+       case OpMod64:
+               return rewriteValueMIPS64_OpMod64(v, config)
+       case OpMod64u:
+               return rewriteValueMIPS64_OpMod64u(v, config)
+       case OpMod8:
+               return rewriteValueMIPS64_OpMod8(v, config)
+       case OpMod8u:
+               return rewriteValueMIPS64_OpMod8u(v, config)
+       case OpMove:
+               return rewriteValueMIPS64_OpMove(v, config)
+       case OpMul16:
+               return rewriteValueMIPS64_OpMul16(v, config)
+       case OpMul32:
+               return rewriteValueMIPS64_OpMul32(v, config)
+       case OpMul32F:
+               return rewriteValueMIPS64_OpMul32F(v, config)
+       case OpMul64:
+               return rewriteValueMIPS64_OpMul64(v, config)
+       case OpMul64F:
+               return rewriteValueMIPS64_OpMul64F(v, config)
+       case OpMul8:
+               return rewriteValueMIPS64_OpMul8(v, config)
        case OpNeg16:
                return rewriteValueMIPS64_OpNeg16(v, config)
        case OpNeg32:
@@ -238,6 +338,70 @@ func rewriteValueMIPS64(v *Value, config *Config) bool {
                return rewriteValueMIPS64_OpOr8(v, config)
        case OpOrB:
                return rewriteValueMIPS64_OpOrB(v, config)
+       case OpRsh16Ux16:
+               return rewriteValueMIPS64_OpRsh16Ux16(v, config)
+       case OpRsh16Ux32:
+               return rewriteValueMIPS64_OpRsh16Ux32(v, config)
+       case OpRsh16Ux64:
+               return rewriteValueMIPS64_OpRsh16Ux64(v, config)
+       case OpRsh16Ux8:
+               return rewriteValueMIPS64_OpRsh16Ux8(v, config)
+       case OpRsh16x16:
+               return rewriteValueMIPS64_OpRsh16x16(v, config)
+       case OpRsh16x32:
+               return rewriteValueMIPS64_OpRsh16x32(v, config)
+       case OpRsh16x64:
+               return rewriteValueMIPS64_OpRsh16x64(v, config)
+       case OpRsh16x8:
+               return rewriteValueMIPS64_OpRsh16x8(v, config)
+       case OpRsh32Ux16:
+               return rewriteValueMIPS64_OpRsh32Ux16(v, config)
+       case OpRsh32Ux32:
+               return rewriteValueMIPS64_OpRsh32Ux32(v, config)
+       case OpRsh32Ux64:
+               return rewriteValueMIPS64_OpRsh32Ux64(v, config)
+       case OpRsh32Ux8:
+               return rewriteValueMIPS64_OpRsh32Ux8(v, config)
+       case OpRsh32x16:
+               return rewriteValueMIPS64_OpRsh32x16(v, config)
+       case OpRsh32x32:
+               return rewriteValueMIPS64_OpRsh32x32(v, config)
+       case OpRsh32x64:
+               return rewriteValueMIPS64_OpRsh32x64(v, config)
+       case OpRsh32x8:
+               return rewriteValueMIPS64_OpRsh32x8(v, config)
+       case OpRsh64Ux16:
+               return rewriteValueMIPS64_OpRsh64Ux16(v, config)
+       case OpRsh64Ux32:
+               return rewriteValueMIPS64_OpRsh64Ux32(v, config)
+       case OpRsh64Ux64:
+               return rewriteValueMIPS64_OpRsh64Ux64(v, config)
+       case OpRsh64Ux8:
+               return rewriteValueMIPS64_OpRsh64Ux8(v, config)
+       case OpRsh64x16:
+               return rewriteValueMIPS64_OpRsh64x16(v, config)
+       case OpRsh64x32:
+               return rewriteValueMIPS64_OpRsh64x32(v, config)
+       case OpRsh64x64:
+               return rewriteValueMIPS64_OpRsh64x64(v, config)
+       case OpRsh64x8:
+               return rewriteValueMIPS64_OpRsh64x8(v, config)
+       case OpRsh8Ux16:
+               return rewriteValueMIPS64_OpRsh8Ux16(v, config)
+       case OpRsh8Ux32:
+               return rewriteValueMIPS64_OpRsh8Ux32(v, config)
+       case OpRsh8Ux64:
+               return rewriteValueMIPS64_OpRsh8Ux64(v, config)
+       case OpRsh8Ux8:
+               return rewriteValueMIPS64_OpRsh8Ux8(v, config)
+       case OpRsh8x16:
+               return rewriteValueMIPS64_OpRsh8x16(v, config)
+       case OpRsh8x32:
+               return rewriteValueMIPS64_OpRsh8x32(v, config)
+       case OpRsh8x64:
+               return rewriteValueMIPS64_OpRsh8x64(v, config)
+       case OpRsh8x8:
+               return rewriteValueMIPS64_OpRsh8x8(v, config)
        case OpSignExt16to32:
                return rewriteValueMIPS64_OpSignExt16to32(v, config)
        case OpSignExt16to64:
@@ -288,6 +452,8 @@ func rewriteValueMIPS64(v *Value, config *Config) bool {
                return rewriteValueMIPS64_OpXor64(v, config)
        case OpXor8:
                return rewriteValueMIPS64_OpXor8(v, config)
+       case OpZero:
+               return rewriteValueMIPS64_OpZero(v, config)
        case OpZeroExt16to32:
                return rewriteValueMIPS64_OpZeroExt16to32(v, config)
        case OpZeroExt16to64:
@@ -498,6 +664,39 @@ func rewriteValueMIPS64_OpAndB(v *Value, config *Config) bool {
                return true
        }
 }
+func rewriteValueMIPS64_OpAvg64u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Avg64u <t> x y)
+       // cond:
+       // result: (ADDV (ADDV <t> (SRLVconst <t> x [1]) (SRLVconst <t> y [1])) (AND <t> (AND <t> x y) (MOVVconst [1])))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64ADDV)
+               v0 := b.NewValue0(v.Line, OpMIPS64ADDV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SRLVconst, t)
+               v1.AuxInt = 1
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpMIPS64SRLVconst, t)
+               v2.AuxInt = 1
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpMIPS64AND, t)
+               v4 := b.NewValue0(v.Line, OpMIPS64AND, t)
+               v4.AddArg(x)
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v5.AuxInt = 1
+               v3.AddArg(v5)
+               v.AddArg(v3)
+               return true
+       }
+}
 func rewriteValueMIPS64_OpClosureCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
@@ -704,10 +903,10 @@ func rewriteValueMIPS64_OpCvt32Fto32(v *Value, config *Config) bool {
        _ = b
        // match: (Cvt32Fto32 x)
        // cond:
-       // result: (MOVFW x)
+       // result: (TRUNCFW x)
        for {
                x := v.Args[0]
-               v.reset(OpMIPS64MOVFW)
+               v.reset(OpMIPS64TRUNCFW)
                v.AddArg(x)
                return true
        }
@@ -717,10 +916,10 @@ func rewriteValueMIPS64_OpCvt32Fto64(v *Value, config *Config) bool {
        _ = b
        // match: (Cvt32Fto64 x)
        // cond:
-       // result: (MOVFV x)
+       // result: (TRUNCFV x)
        for {
                x := v.Args[0]
-               v.reset(OpMIPS64MOVFV)
+               v.reset(OpMIPS64TRUNCFV)
                v.AddArg(x)
                return true
        }
@@ -769,10 +968,10 @@ func rewriteValueMIPS64_OpCvt64Fto32(v *Value, config *Config) bool {
        _ = b
        // match: (Cvt64Fto32 x)
        // cond:
-       // result: (MOVDW x)
+       // result: (TRUNCDW x)
        for {
                x := v.Args[0]
-               v.reset(OpMIPS64MOVDW)
+               v.reset(OpMIPS64TRUNCDW)
                v.AddArg(x)
                return true
        }
@@ -795,10 +994,10 @@ func rewriteValueMIPS64_OpCvt64Fto64(v *Value, config *Config) bool {
        _ = b
        // match: (Cvt64Fto64 x)
        // cond:
-       // result: (MOVDV x)
+       // result: (TRUNCDV x)
        for {
                x := v.Args[0]
-               v.reset(OpMIPS64MOVDV)
+               v.reset(OpMIPS64TRUNCDV)
                v.AddArg(x)
                return true
        }
@@ -844,17 +1043,38 @@ func rewriteValueMIPS64_OpDeferCall(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueMIPS64_OpEq16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpDiv16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Eq16 x y)
+       // match: (Div16 x y)
        // cond:
-       // result: (SGTU (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)) (MOVVconst [0]))
+       // result: (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpMIPS64SGTU)
-               v0 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v1 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpDiv16u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div16u x y)
+       // cond:
+       // result: (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
                v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
                v1.AddArg(x)
                v0.AddArg(v1)
@@ -862,23 +1082,56 @@ func rewriteValueMIPS64_OpEq16(v *Value, config *Config) bool {
                v2.AddArg(y)
                v0.AddArg(v2)
                v.AddArg(v0)
-               v3 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-               v3.AuxInt = 0
-               v.AddArg(v3)
                return true
        }
 }
-func rewriteValueMIPS64_OpEq32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpDiv32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Eq32 x y)
+       // match: (Div32 x y)
        // cond:
-       // result: (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
+       // result: (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpMIPS64SGTU)
-               v0 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v1 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpDiv32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div32F x y)
+       // cond:
+       // result: (DIVF x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64DIVF)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpDiv32u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div32u x y)
+       // cond:
+       // result: (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
                v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
                v1.AddArg(x)
                v0.AddArg(v1)
@@ -886,9 +1139,145 @@ func rewriteValueMIPS64_OpEq32(v *Value, config *Config) bool {
                v2.AddArg(y)
                v0.AddArg(v2)
                v.AddArg(v0)
-               v3 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-               v3.AuxInt = 0
-               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpDiv64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div64 x y)
+       // cond:
+       // result: (Select1 (DIVV x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpDiv64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div64F x y)
+       // cond:
+       // result: (DIVD x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64DIVD)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpDiv64u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div64u x y)
+       // cond:
+       // result: (Select1 (DIVVU x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpDiv8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div8 x y)
+       // cond:
+       // result: (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v1 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpDiv8u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div8u x y)
+       // cond:
+       // result: (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpEq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq16 x y)
+       // cond:
+       // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SGTU)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 1
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
+               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v2.AddArg(x)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpEq32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq32 x y)
+       // cond:
+       // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SGTU)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 1
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
+               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v2.AddArg(x)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v.AddArg(v1)
                return true
        }
 }
@@ -914,17 +1303,17 @@ func rewriteValueMIPS64_OpEq64(v *Value, config *Config) bool {
        _ = b
        // match: (Eq64 x y)
        // cond:
-       // result: (SGTU (XOR x y) (MOVVconst [0]))
+       // result: (SGTU (MOVVconst [1]) (XOR x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
                v.reset(OpMIPS64SGTU)
-               v0 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-               v0.AddArg(x)
-               v0.AddArg(y)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 1
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-               v1.AuxInt = 0
+               v1 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v1.AddArg(y)
                v.AddArg(v1)
                return true
        }
@@ -951,22 +1340,22 @@ func rewriteValueMIPS64_OpEq8(v *Value, config *Config) bool {
        _ = b
        // match: (Eq8 x y)
        // cond:
-       // result: (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
+       // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
                v.reset(OpMIPS64SGTU)
-               v0 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v0.AddArg(v2)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 1
                v.AddArg(v0)
-               v3 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-               v3.AuxInt = 0
-               v.AddArg(v3)
+               v1 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
+               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v2.AddArg(x)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v.AddArg(v1)
                return true
        }
 }
@@ -995,17 +1384,17 @@ func rewriteValueMIPS64_OpEqPtr(v *Value, config *Config) bool {
        _ = b
        // match: (EqPtr x y)
        // cond:
-       // result: (SGTU (XOR x y) (MOVVconst [0]))
+       // result: (SGTU (MOVVconst [1]) (XOR x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
                v.reset(OpMIPS64SGTU)
-               v0 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-               v0.AddArg(x)
-               v0.AddArg(y)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 1
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-               v1.AuxInt = 0
+               v1 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v1.AddArg(y)
                v.AddArg(v1)
                return true
        }
@@ -1432,32 +1821,210 @@ func rewriteValueMIPS64_OpGreater8U(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueMIPS64_OpInterCall(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpHmul16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (InterCall [argwid] entry mem)
+       // match: (Hmul16 x y)
        // cond:
-       // result: (CALLinter [argwid] entry mem)
+       // result: (SRAVconst (Select1 <config.fe.TypeInt32()> (MULV (SignExt16to64 x) (SignExt16to64 y))) [16])
        for {
-               argwid := v.AuxInt
-               entry := v.Args[0]
-               mem := v.Args[1]
-               v.reset(OpMIPS64CALLinter)
-               v.AuxInt = argwid
-               v.AddArg(entry)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SRAVconst)
+               v.AuxInt = 16
+               v0 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeInt32())
+               v1 := b.NewValue0(v.Line, OpMIPS64MULV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v2 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v2.AddArg(x)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueMIPS64_OpIsInBounds(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpHmul16u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (IsInBounds idx len)
+       // match: (Hmul16u x y)
        // cond:
-       // result: (SGTU len idx)
+       // result: (SRLVconst (Select1 <config.fe.TypeUInt32()> (MULVU (ZeroExt16to64 x) (ZeroExt16to64 y))) [16])
        for {
-               idx := v.Args[0]
-               len := v.Args[1]
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SRLVconst)
+               v.AuxInt = 16
+               v0 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeUInt32())
+               v1 := b.NewValue0(v.Line, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v2.AddArg(x)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpHmul32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul32 x y)
+       // cond:
+       // result: (SRAVconst (Select1 <config.fe.TypeInt64()> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SRAVconst)
+               v.AuxInt = 32
+               v0 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeInt64())
+               v1 := b.NewValue0(v.Line, OpMIPS64MULV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v2 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v2.AddArg(x)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpHmul32u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul32u x y)
+       // cond:
+       // result: (SRLVconst (Select1 <config.fe.TypeUInt64()> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SRLVconst)
+               v.AuxInt = 32
+               v0 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeUInt64())
+               v1 := b.NewValue0(v.Line, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v2.AddArg(x)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpHmul64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul64 x y)
+       // cond:
+       // result: (Select0 (MULV x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64MULV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpHmul64u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul64u x y)
+       // cond:
+       // result: (Select0 (MULVU x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpHmul8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul8 x y)
+       // cond:
+       // result: (SRAVconst (Select1 <config.fe.TypeInt16()> (MULV (SignExt8to64 x) (SignExt8to64 y))) [8])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SRAVconst)
+               v.AuxInt = 8
+               v0 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeInt16())
+               v1 := b.NewValue0(v.Line, OpMIPS64MULV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v2 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v2.AddArg(x)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpHmul8u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul8u x y)
+       // cond:
+       // result: (SRLVconst (Select1 <config.fe.TypeUInt16()> (MULVU (ZeroExt8to64 x) (ZeroExt8to64 y))) [8])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SRLVconst)
+               v.AuxInt = 8
+               v0 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeUInt16())
+               v1 := b.NewValue0(v.Line, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v2.AddArg(x)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpInterCall(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (InterCall [argwid] entry mem)
+       // cond:
+       // result: (CALLinter [argwid] entry mem)
+       for {
+               argwid := v.AuxInt
+               entry := v.Args[0]
+               mem := v.Args[1]
+               v.reset(OpMIPS64CALLinter)
+               v.AuxInt = argwid
+               v.AddArg(entry)
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpIsInBounds(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (IsInBounds idx len)
+       // cond:
+       // result: (SGTU len idx)
+       for {
+               idx := v.Args[0]
+               len := v.Args[1]
                v.reset(OpMIPS64SGTU)
                v.AddArg(len)
                v.AddArg(idx)
@@ -1742,15 +2309,15 @@ func rewriteValueMIPS64_OpLess16U(v *Value, config *Config) bool {
        _ = b
        // match: (Less16U x y)
        // cond:
-       // result: (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
+       // result: (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
        for {
                x := v.Args[0]
                y := v.Args[1]
                v.reset(OpMIPS64SGTU)
-               v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
                v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
                v1.AddArg(x)
                v.AddArg(v1)
                return true
@@ -1882,15 +2449,15 @@ func rewriteValueMIPS64_OpLess8U(v *Value, config *Config) bool {
        _ = b
        // match: (Less8U x y)
        // cond:
-       // result: (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
+       // result: (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
        for {
                x := v.Args[0]
                y := v.Args[1]
                v.reset(OpMIPS64SGTU)
-               v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
                v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
                v1.AddArg(x)
                v.AddArg(v1)
                return true
@@ -2051,377 +2618,2558 @@ func rewriteValueMIPS64_OpLoad(v *Value, config *Config) bool {
        }
        return false
 }
-func rewriteValueMIPS64_OpNeg16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh16x16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg16 x)
+       // match: (Lsh16x16 <t> x y)
        // cond:
-       // result: (NEGV x)
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
        for {
+               t := v.Type
                x := v.Args[0]
-               v.reset(OpMIPS64NEGV)
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
+               v4.AddArg(x)
+               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
                return true
        }
 }
-func rewriteValueMIPS64_OpNeg32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh16x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg32 x)
+       // match: (Lsh16x32 <t> x y)
        // cond:
-       // result: (NEGV x)
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
        for {
+               t := v.Type
                x := v.Args[0]
-               v.reset(OpMIPS64NEGV)
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
+               v4.AddArg(x)
+               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
                return true
        }
 }
-func rewriteValueMIPS64_OpNeg32F(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh16x64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg32F x)
+       // match: (Lsh16x64 <t> x y)
        // cond:
-       // result: (NEGF x)
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
        for {
+               t := v.Type
                x := v.Args[0]
-               v.reset(OpMIPS64NEGF)
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
+               v3.AddArg(x)
+               v3.AddArg(y)
+               v.AddArg(v3)
                return true
        }
 }
-func rewriteValueMIPS64_OpNeg64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh16x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg64 x)
+       // match: (Lsh16x8  <t> x y)
        // cond:
-       // result: (NEGV x)
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SLLV <t> x (ZeroExt8to64  y)))
        for {
+               t := v.Type
                x := v.Args[0]
-               v.reset(OpMIPS64NEGV)
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
+               v4.AddArg(x)
+               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
                return true
        }
 }
-func rewriteValueMIPS64_OpNeg64F(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh32x16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg64F x)
+       // match: (Lsh32x16 <t> x y)
        // cond:
-       // result: (NEGD x)
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
        for {
+               t := v.Type
                x := v.Args[0]
-               v.reset(OpMIPS64NEGD)
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
+               v4.AddArg(x)
+               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
                return true
        }
 }
-func rewriteValueMIPS64_OpNeg8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh32x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg8 x)
+       // match: (Lsh32x32 <t> x y)
        // cond:
-       // result: (NEGV x)
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
        for {
+               t := v.Type
                x := v.Args[0]
-               v.reset(OpMIPS64NEGV)
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
+               v4.AddArg(x)
+               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
                return true
        }
 }
-func rewriteValueMIPS64_OpNeq16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh32x64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq16 x y)
+       // match: (Lsh32x64 <t> x y)
        // cond:
-       // result: (SGTU (MOVVconst [0]) (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)))
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
        for {
+               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpMIPS64SGTU)
-               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(x)
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
                v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
+               v3.AddArg(x)
                v3.AddArg(y)
-               v1.AddArg(v3)
-               v.AddArg(v1)
+               v.AddArg(v3)
                return true
        }
 }
-func rewriteValueMIPS64_OpNeq32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh32x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq32 x y)
+       // match: (Lsh32x8  <t> x y)
        // cond:
-       // result: (SGTU (MOVVconst [0]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) )
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SLLV <t> x (ZeroExt8to64  y)))
        for {
+               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpMIPS64SGTU)
-               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
+               v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
+               v4.AddArg(x)
+               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpLsh64x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh64x16 <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
+               v4.AddArg(x)
+               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpLsh64x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh64x32 <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
+               v4.AddArg(x)
+               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpLsh64x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh64x64 <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
+               v3.AddArg(x)
+               v3.AddArg(y)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpLsh64x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh64x8  <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SLLV <t> x (ZeroExt8to64  y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
+               v4.AddArg(x)
+               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpLsh8x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x16 <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
+               v4.AddArg(x)
+               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpLsh8x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x32 <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
+               v4.AddArg(x)
+               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpLsh8x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x64 <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
+               v3.AddArg(x)
+               v3.AddArg(y)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpLsh8x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x8  <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SLLV <t> x (ZeroExt8to64  y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
+               v4.AddArg(x)
+               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMod16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod16 x y)
+       // cond:
+       // result: (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v1 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMod16u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod16u x y)
+       // cond:
+       // result: (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMod32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod32 x y)
+       // cond:
+       // result: (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v1 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMod32u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod32u x y)
+       // cond:
+       // result: (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
                v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v2.AddArg(x)
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMod64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod64 x y)
+       // cond:
+       // result: (Select0 (DIVV x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMod64u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod64u x y)
+       // cond:
+       // result: (Select0 (DIVVU x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMod8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod8 x y)
+       // cond:
+       // result: (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v1 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMod8u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod8u x y)
+       // cond:
+       // result: (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMove(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Move [s] _ _ mem)
+       // cond: SizeAndAlign(s).Size() == 0
+       // result: mem
+       for {
+               s := v.AuxInt
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 0) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = mem.Type
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 1
+       // result: (MOVBstore dst (MOVBload src mem) mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 1) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore dst (MOVHload src mem) mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 2
+       // result: (MOVBstore [1] dst (MOVBload [1] src mem)            (MOVBstore dst (MOVBload src mem) mem))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 2) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBstore)
+               v.AuxInt = 1
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
+               v0.AuxInt = 1
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+       // result: (MOVWstore dst (MOVWload src mem) mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVWstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVWload, config.fe.TypeInt32())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore [2] dst (MOVHload [2] src mem)            (MOVHstore dst (MOVHload src mem) mem))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHstore)
+               v.AuxInt = 2
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
+               v0.AuxInt = 2
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 4
+       // result: (MOVBstore [3] dst (MOVBload [3] src mem)            (MOVBstore [2] dst (MOVBload [2] src mem)                       (MOVBstore [1] dst (MOVBload [1] src mem)                               (MOVBstore dst (MOVBload src mem) mem))))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 4) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBstore)
+               v.AuxInt = 3
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
+               v0.AuxInt = 3
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
+               v1.AuxInt = 2
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
+               v2.AuxInt = 2
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
+               v3.AuxInt = 1
+               v3.AddArg(dst)
+               v4 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
+               v4.AuxInt = 1
+               v4.AddArg(src)
+               v4.AddArg(mem)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
+               v5.AddArg(dst)
+               v6 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
+               v6.AddArg(src)
+               v6.AddArg(mem)
+               v5.AddArg(v6)
+               v5.AddArg(mem)
+               v3.AddArg(v5)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
+       // result: (MOVVstore dst (MOVVload src mem) mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVload, config.fe.TypeUInt64())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
+       // result: (MOVWstore [4] dst (MOVWload [4] src mem)            (MOVWstore dst (MOVWload src mem) mem))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVWstore)
+               v.AuxInt = 4
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVWload, config.fe.TypeInt32())
+               v0.AuxInt = 4
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVWstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVWload, config.fe.TypeInt32())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore [6] dst (MOVHload [6] src mem)            (MOVHstore [4] dst (MOVHload [4] src mem)                       (MOVHstore [2] dst (MOVHload [2] src mem)                               (MOVHstore dst (MOVHload src mem) mem))))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHstore)
+               v.AuxInt = 6
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
+               v0.AuxInt = 6
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
+               v1.AuxInt = 4
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
+               v2.AuxInt = 4
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
+               v3.AuxInt = 2
+               v3.AddArg(dst)
+               v4 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
+               v4.AuxInt = 2
+               v4.AddArg(src)
+               v4.AddArg(mem)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
+               v5.AddArg(dst)
+               v6 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
+               v6.AddArg(src)
+               v6.AddArg(mem)
+               v5.AddArg(v6)
+               v5.AddArg(mem)
+               v3.AddArg(v5)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 3
+       // result: (MOVBstore [2] dst (MOVBload [2] src mem)            (MOVBstore [1] dst (MOVBload [1] src mem)                       (MOVBstore dst (MOVBload src mem) mem)))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 3) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBstore)
+               v.AuxInt = 2
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
+               v0.AuxInt = 2
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
+               v1.AuxInt = 1
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
+               v2.AuxInt = 1
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
+               v3.AddArg(dst)
+               v4 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
+               v4.AddArg(src)
+               v4.AddArg(mem)
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore [4] dst (MOVHload [4] src mem)            (MOVHstore [2] dst (MOVHload [2] src mem)                       (MOVHstore dst (MOVHload src mem) mem)))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHstore)
+               v.AuxInt = 4
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
+               v0.AuxInt = 4
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
+               v1.AuxInt = 2
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
+               v2.AuxInt = 2
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
+               v3.AddArg(dst)
+               v4 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
+               v4.AddArg(src)
+               v4.AddArg(mem)
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0
+       // result: (MOVWstore [8] dst (MOVWload [8] src mem)            (MOVWstore [4] dst (MOVWload [4] src mem)                       (MOVWstore dst (MOVWload src mem) mem)))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVWstore)
+               v.AuxInt = 8
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVWload, config.fe.TypeInt32())
+               v0.AuxInt = 8
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVWstore, TypeMem)
+               v1.AuxInt = 4
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVWload, config.fe.TypeInt32())
+               v2.AuxInt = 4
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpMIPS64MOVWstore, TypeMem)
+               v3.AddArg(dst)
+               v4 := b.NewValue0(v.Line, OpMIPS64MOVWload, config.fe.TypeInt32())
+               v4.AddArg(src)
+               v4.AddArg(mem)
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0
+       // result: (MOVVstore [8] dst (MOVVload [8] src mem)            (MOVVstore dst (MOVVload src mem) mem))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVstore)
+               v.AuxInt = 8
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVload, config.fe.TypeUInt64())
+               v0.AuxInt = 8
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVVstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVVload, config.fe.TypeUInt64())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0
+       // result: (MOVVstore [16] dst (MOVVload [16] src mem)          (MOVVstore [8] dst (MOVVload [8] src mem)                       (MOVVstore dst (MOVVload src mem) mem)))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVstore)
+               v.AuxInt = 16
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVload, config.fe.TypeUInt64())
+               v0.AuxInt = 16
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVVstore, TypeMem)
+               v1.AuxInt = 8
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVVload, config.fe.TypeUInt64())
+               v2.AuxInt = 8
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpMIPS64MOVVstore, TypeMem)
+               v3.AddArg(dst)
+               v4 := b.NewValue0(v.Line, OpMIPS64MOVVload, config.fe.TypeUInt64())
+               v4.AddArg(src)
+               v4.AddArg(mem)
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() > 24 || SizeAndAlign(s).Align()%8 != 0
+       // result: (LoweredMove [SizeAndAlign(s).Align()]               dst             src             (ADDVconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])           mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() > 24 || SizeAndAlign(s).Align()%8 != 0) {
+                       break
+               }
+               v.reset(OpMIPS64LoweredMove)
+               v.AuxInt = SizeAndAlign(s).Align()
+               v.AddArg(dst)
+               v.AddArg(src)
+               v0 := b.NewValue0(v.Line, OpMIPS64ADDVconst, src.Type)
+               v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+               v0.AddArg(src)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMul16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul16 x y)
+       // cond:
+       // result: (Select1 (MULVU x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMul32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul32 x y)
+       // cond:
+       // result: (Select1 (MULVU x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMul32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul32F x y)
+       // cond:
+       // result: (MULF x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64MULF)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMul64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul64 x y)
+       // cond:
+       // result: (Select1 (MULVU x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMul64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul64F x y)
+       // cond:
+       // result: (MULD x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64MULD)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMul8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul8 x y)
+       // cond:
+       // result: (Select1 (MULVU x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpNeg16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg16 x)
+       // cond:
+       // result: (NEGV x)
+       for {
+               x := v.Args[0]
+               v.reset(OpMIPS64NEGV)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpNeg32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg32 x)
+       // cond:
+       // result: (NEGV x)
+       for {
+               x := v.Args[0]
+               v.reset(OpMIPS64NEGV)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpNeg32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg32F x)
+       // cond:
+       // result: (NEGF x)
+       for {
+               x := v.Args[0]
+               v.reset(OpMIPS64NEGF)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpNeg64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg64 x)
+       // cond:
+       // result: (NEGV x)
+       for {
+               x := v.Args[0]
+               v.reset(OpMIPS64NEGV)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpNeg64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg64F x)
+       // cond:
+       // result: (NEGD x)
+       for {
+               x := v.Args[0]
+               v.reset(OpMIPS64NEGD)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpNeg8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg8 x)
+       // cond:
+       // result: (NEGV x)
+       for {
+               x := v.Args[0]
+               v.reset(OpMIPS64NEGV)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpNeq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq16 x y)
+       // cond:
+       // result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SGTU)
+               v0 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v3.AuxInt = 0
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpNeq32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq32 x y)
+       // cond:
+       // result: (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SGTU)
+               v0 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
+               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v3.AuxInt = 0
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpNeq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq32F x y)
+       // cond:
+       // result: (FPFlagFalse (CMPEQF x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64FPFlagFalse)
+               v0 := b.NewValue0(v.Line, OpMIPS64CMPEQF, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpNeq64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq64 x y)
+       // cond:
+       // result: (SGTU (XOR x y) (MOVVconst [0]))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SGTU)
+               v0 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v1.AuxInt = 0
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpNeq64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq64F x y)
+       // cond:
+       // result: (FPFlagFalse (CMPEQD x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64FPFlagFalse)
+               v0 := b.NewValue0(v.Line, OpMIPS64CMPEQD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpNeq8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq8 x y)
+       // cond:
+       // result: (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SGTU)
+               v0 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v3.AuxInt = 0
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpNeqB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NeqB x y)
+       // cond:
+       // result: (XOR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64XOR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpNeqPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NeqPtr x y)
+       // cond:
+       // result: (SGTU (XOR x y) (MOVVconst [0]))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SGTU)
+               v0 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v1.AuxInt = 0
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpNilCheck(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NilCheck ptr mem)
+       // cond:
+       // result: (LoweredNilCheck ptr mem)
+       for {
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               v.reset(OpMIPS64LoweredNilCheck)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpNot(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Not x)
+       // cond:
+       // result: (XOR (MOVVconst [1]) x)
+       for {
+               x := v.Args[0]
+               v.reset(OpMIPS64XOR)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 1
+               v.AddArg(v0)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpOffPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (OffPtr [off] ptr:(SP))
+       // cond:
+       // result: (MOVVaddr [off] ptr)
+       for {
+               off := v.AuxInt
+               ptr := v.Args[0]
+               if ptr.Op != OpSP {
+                       break
+               }
+               v.reset(OpMIPS64MOVVaddr)
+               v.AuxInt = off
+               v.AddArg(ptr)
+               return true
+       }
+       // match: (OffPtr [off] ptr)
+       // cond:
+       // result: (ADDVconst [off] ptr)
+       for {
+               off := v.AuxInt
+               ptr := v.Args[0]
+               v.reset(OpMIPS64ADDVconst)
+               v.AuxInt = off
+               v.AddArg(ptr)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpOr16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or16 x y)
+       // cond:
+       // result: (OR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64OR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpOr32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or32 x y)
+       // cond:
+       // result: (OR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64OR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpOr64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or64 x y)
+       // cond:
+       // result: (OR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64OR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpOr8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or8 x y)
+       // cond:
+       // result: (OR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64OR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpOrB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (OrB x y)
+       // cond:
+       // result: (OR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64OR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh16Ux16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux16 <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
+               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v5.AddArg(x)
+               v4.AddArg(v5)
+               v6 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v4.AddArg(v6)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh16Ux32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux32 <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
+               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v5.AddArg(x)
+               v4.AddArg(v5)
+               v6 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v4.AddArg(v6)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh16Ux64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux64 <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
+               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v4.AddArg(x)
+               v3.AddArg(v4)
+               v3.AddArg(y)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh16Ux8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux8  <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64  y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
+               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v5.AddArg(x)
+               v4.AddArg(v5)
+               v6 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v4.AddArg(v6)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh16x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x16 <t> x y)
+       // cond:
+       // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SRAV)
+               v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v5.AuxInt = 63
+               v3.AddArg(v5)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v6 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v1.AddArg(v6)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh16x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x32 <t> x y)
+       // cond:
+       // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SRAV)
+               v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v5.AuxInt = 63
+               v3.AddArg(v5)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v6 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v1.AddArg(v6)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh16x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x64 <t> x y)
+       // cond:
+       // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SRAV)
+               v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v3.AddArg(y)
+               v4 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v4.AuxInt = 63
+               v3.AddArg(v4)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v1.AddArg(y)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh16x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x8  <t> x y)
+       // cond:
+       // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64  y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SRAV)
+               v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v5.AuxInt = 63
+               v3.AddArg(v5)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v6 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v1.AddArg(v6)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh32Ux16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux16 <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
+               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v5.AddArg(x)
+               v4.AddArg(v5)
+               v6 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v4.AddArg(v6)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh32Ux32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux32 <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
+               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v5.AddArg(x)
+               v4.AddArg(v5)
+               v6 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v4.AddArg(v6)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh32Ux64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux64 <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
+               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v4.AddArg(x)
+               v3.AddArg(v4)
+               v3.AddArg(y)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh32Ux8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux8  <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64  y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
+               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v5.AddArg(x)
+               v4.AddArg(v5)
+               v6 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v4.AddArg(v6)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh32x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32x16 <t> x y)
+       // cond:
+       // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SRAV)
+               v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v5.AuxInt = 63
+               v3.AddArg(v5)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v6 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v1.AddArg(v6)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh32x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32x32 <t> x y)
+       // cond:
+       // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SRAV)
+               v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v5.AuxInt = 63
+               v3.AddArg(v5)
+               v2.AddArg(v3)
                v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v3.AddArg(y)
-               v1.AddArg(v3)
+               v6 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v1.AddArg(v6)
                v.AddArg(v1)
                return true
        }
 }
-func rewriteValueMIPS64_OpNeq32F(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh32x64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq32F x y)
+       // match: (Rsh32x64 <t> x y)
        // cond:
-       // result: (FPFlagFalse (CMPEQF x y))
+       // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
        for {
+               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpMIPS64FPFlagFalse)
-               v0 := b.NewValue0(v.Line, OpMIPS64CMPEQF, TypeFlags)
+               v.reset(OpMIPS64SRAV)
+               v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
                v0.AddArg(x)
-               v0.AddArg(y)
                v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v3.AddArg(y)
+               v4 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v4.AuxInt = 63
+               v3.AddArg(v4)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueMIPS64_OpNeq64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh32x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq64 x y)
+       // match: (Rsh32x8  <t> x y)
        // cond:
-       // result: (SGTU (MOVVconst [0]) (XOR x y))
+       // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64  y)))
        for {
+               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpMIPS64SGTU)
-               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
+               v.reset(OpMIPS64SRAV)
+               v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v0.AddArg(x)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v1.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v5.AuxInt = 63
+               v3.AddArg(v5)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v6 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v1.AddArg(v6)
                v.AddArg(v1)
                return true
        }
 }
-func rewriteValueMIPS64_OpNeq64F(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh64Ux16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq64F x y)
+       // match: (Rsh64Ux16 <t> x y)
        // cond:
-       // result: (FPFlagFalse (CMPEQD x y))
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
        for {
+               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpMIPS64FPFlagFalse)
-               v0 := b.NewValue0(v.Line, OpMIPS64CMPEQD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
                v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
+               v4.AddArg(x)
+               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
                return true
        }
 }
-func rewriteValueMIPS64_OpNeq8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh64Ux32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq8 x y)
+       // match: (Rsh64Ux32 <t> x y)
        // cond:
-       // result: (SGTU (MOVVconst [0]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
        for {
+               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpMIPS64SGTU)
-               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v2.AddArg(x)
+               v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
+               v4.AddArg(x)
+               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh64Ux64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh64Ux64 <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> x y))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
+               v3.AddArg(x)
+               v3.AddArg(y)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh64Ux8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh64Ux8  <t> x y)
+       // cond:
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SRLV <t> x (ZeroExt8to64  y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
                v1.AddArg(v2)
                v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
                v3.AddArg(y)
                v1.AddArg(v3)
-               v.AddArg(v1)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
+               v4.AddArg(x)
+               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
                return true
        }
 }
-func rewriteValueMIPS64_OpNeqB(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh64x16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NeqB x y)
+       // match: (Rsh64x16 <t> x y)
        // cond:
-       // result: (XOR x y)
+       // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
        for {
+               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpMIPS64XOR)
+               v.reset(OpMIPS64SRAV)
                v.AddArg(x)
-               v.AddArg(y)
+               v0 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v4.AuxInt = 63
+               v2.AddArg(v4)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v0.AddArg(v5)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueMIPS64_OpNeqPtr(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh64x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NeqPtr x y)
+       // match: (Rsh64x32 <t> x y)
        // cond:
-       // result: (SGTU (MOVVconst [0]) (XOR x y))
+       // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
        for {
+               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpMIPS64SGTU)
-               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
+               v.reset(OpMIPS64SRAV)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v4.AuxInt = 63
+               v2.AddArg(v4)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v0.AddArg(v5)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v1.AddArg(y)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueMIPS64_OpNilCheck(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh64x64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NilCheck ptr mem)
+       // match: (Rsh64x64 <t> x y)
        // cond:
-       // result: (LoweredNilCheck ptr mem)
+       // result: (SRAV x (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
        for {
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               v.reset(OpMIPS64LoweredNilCheck)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SRAV)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2.AddArg(y)
+               v3 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v3.AuxInt = 63
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueMIPS64_OpNot(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh64x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Not x)
+       // match: (Rsh64x8  <t> x y)
        // cond:
-       // result: (XOR (MOVVconst [1]) x)
+       // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64  y)))
        for {
+               t := v.Type
                x := v.Args[0]
-               v.reset(OpMIPS64XOR)
-               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-               v0.AuxInt = 1
-               v.AddArg(v0)
+               y := v.Args[1]
+               v.reset(OpMIPS64SRAV)
                v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v4.AuxInt = 63
+               v2.AddArg(v4)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v0.AddArg(v5)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueMIPS64_OpOffPtr(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh8Ux16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (OffPtr [off] ptr:(SP))
+       // match: (Rsh8Ux16 <t> x y)
        // cond:
-       // result: (MOVVaddr [off] ptr)
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
        for {
-               off := v.AuxInt
-               ptr := v.Args[0]
-               if ptr.Op != OpSP {
-                       break
-               }
-               v.reset(OpMIPS64MOVVaddr)
-               v.AuxInt = off
-               v.AddArg(ptr)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
+               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v5.AddArg(x)
+               v4.AddArg(v5)
+               v6 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v4.AddArg(v6)
+               v.AddArg(v4)
                return true
        }
-       // match: (OffPtr [off] ptr)
+}
+func rewriteValueMIPS64_OpRsh8Ux32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8Ux32 <t> x y)
        // cond:
-       // result: (ADDVconst [off] ptr)
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
        for {
-               off := v.AuxInt
-               ptr := v.Args[0]
-               v.reset(OpMIPS64ADDVconst)
-               v.AuxInt = off
-               v.AddArg(ptr)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
+               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v5.AddArg(x)
+               v4.AddArg(v5)
+               v6 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v4.AddArg(v6)
+               v.AddArg(v4)
                return true
        }
 }
-func rewriteValueMIPS64_OpOr16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh8Ux64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or16 x y)
+       // match: (Rsh8Ux64 <t> x y)
        // cond:
-       // result: (OR x y)
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
        for {
+               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpMIPS64OR)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
+               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v4.AddArg(x)
+               v3.AddArg(v4)
+               v3.AddArg(y)
+               v.AddArg(v3)
                return true
        }
 }
-func rewriteValueMIPS64_OpOr32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh8Ux8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or32 x y)
+       // match: (Rsh8Ux8  <t> x y)
        // cond:
-       // result: (OR x y)
+       // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64  y)))
        for {
+               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpMIPS64OR)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpMIPS64AND)
+               v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v2.AuxInt = 64
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
+               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v5.AddArg(x)
+               v4.AddArg(v5)
+               v6 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v4.AddArg(v6)
+               v.AddArg(v4)
                return true
        }
 }
-func rewriteValueMIPS64_OpOr64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh8x16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or64 x y)
+       // match: (Rsh8x16 <t> x y)
        // cond:
-       // result: (OR x y)
+       // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
        for {
+               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpMIPS64OR)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpMIPS64SRAV)
+               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v5.AuxInt = 63
+               v3.AddArg(v5)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v6 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v1.AddArg(v6)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueMIPS64_OpOr8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh8x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or8 x y)
+       // match: (Rsh8x32 <t> x y)
        // cond:
-       // result: (OR x y)
+       // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
        for {
+               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpMIPS64OR)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpMIPS64SRAV)
+               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v5.AuxInt = 63
+               v3.AddArg(v5)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v6 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v1.AddArg(v6)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh8x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x64 <t> x y)
+       // cond:
+       // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SRAV)
+               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v3.AddArg(y)
+               v4 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v4.AuxInt = 63
+               v3.AddArg(v4)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueMIPS64_OpOrB(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh8x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (OrB x y)
+       // match: (Rsh8x8  <t> x y)
        // cond:
-       // result: (OR x y)
+       // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64  y)))
        for {
+               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpMIPS64OR)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpMIPS64SRAV)
+               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v5.AuxInt = 63
+               v3.AddArg(v5)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v6 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v1.AddArg(v6)
+               v.AddArg(v1)
                return true
        }
 }
@@ -2882,6 +5630,449 @@ func rewriteValueMIPS64_OpXor8(v *Value, config *Config) bool {
                return true
        }
 }
+func rewriteValueMIPS64_OpZero(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Zero [s] _ mem)
+       // cond: SizeAndAlign(s).Size() == 0
+       // result: mem
+       for {
+               s := v.AuxInt
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 0) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = mem.Type
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 1
+       // result: (MOVBstore ptr (MOVVconst [0]) mem)
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 1) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBstore)
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore ptr (MOVVconst [0]) mem)
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHstore)
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 2
+       // result: (MOVBstore [1] ptr (MOVVconst [0])           (MOVBstore [0] ptr (MOVVconst [0]) mem))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 2) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBstore)
+               v.AuxInt = 1
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
+               v1.AuxInt = 0
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+       // result: (MOVWstore ptr (MOVVconst [0]) mem)
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVWstore)
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore [2] ptr (MOVVconst [0])           (MOVHstore [0] ptr (MOVVconst [0]) mem))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHstore)
+               v.AuxInt = 2
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
+               v1.AuxInt = 0
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 4
+       // result: (MOVBstore [3] ptr (MOVVconst [0])           (MOVBstore [2] ptr (MOVVconst [0])                      (MOVBstore [1] ptr (MOVVconst [0])                              (MOVBstore [0] ptr (MOVVconst [0]) mem))))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 4) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBstore)
+               v.AuxInt = 3
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
+               v1.AuxInt = 2
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
+               v3.AuxInt = 1
+               v3.AddArg(ptr)
+               v4 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v4.AuxInt = 0
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
+               v5.AuxInt = 0
+               v5.AddArg(ptr)
+               v6 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v6.AuxInt = 0
+               v5.AddArg(v6)
+               v5.AddArg(mem)
+               v3.AddArg(v5)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
+       // result: (MOVVstore ptr (MOVVconst [0]) mem)
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVstore)
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
+       // result: (MOVWstore [4] ptr (MOVVconst [0])           (MOVWstore [0] ptr (MOVVconst [0]) mem))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVWstore)
+               v.AuxInt = 4
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVWstore, TypeMem)
+               v1.AuxInt = 0
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 4
+       // result: (MOVHstore [6] ptr (MOVVconst [0])           (MOVHstore [4] ptr (MOVVconst [0])                      (MOVHstore [2] ptr (MOVVconst [0])                              (MOVHstore [0] ptr (MOVVconst [0]) mem))))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 4) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHstore)
+               v.AuxInt = 6
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
+               v1.AuxInt = 4
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
+               v3.AuxInt = 2
+               v3.AddArg(ptr)
+               v4 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v4.AuxInt = 0
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
+               v5.AuxInt = 0
+               v5.AddArg(ptr)
+               v6 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v6.AuxInt = 0
+               v5.AddArg(v6)
+               v5.AddArg(mem)
+               v3.AddArg(v5)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 3
+       // result: (MOVBstore [2] ptr (MOVVconst [0])           (MOVBstore [1] ptr (MOVVconst [0])                      (MOVBstore [0] ptr (MOVVconst [0]) mem)))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 3) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBstore)
+               v.AuxInt = 2
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
+               v1.AuxInt = 1
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
+               v3.AuxInt = 0
+               v3.AddArg(ptr)
+               v4 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v4.AuxInt = 0
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore [4] ptr (MOVVconst [0])           (MOVHstore [2] ptr (MOVVconst [0])                      (MOVHstore [0] ptr (MOVVconst [0]) mem)))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHstore)
+               v.AuxInt = 4
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
+               v1.AuxInt = 2
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
+               v3.AuxInt = 0
+               v3.AddArg(ptr)
+               v4 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v4.AuxInt = 0
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0
+       // result: (MOVWstore [8] ptr (MOVVconst [0])           (MOVWstore [4] ptr (MOVVconst [0])                      (MOVWstore [0] ptr (MOVVconst [0]) mem)))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVWstore)
+               v.AuxInt = 8
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVWstore, TypeMem)
+               v1.AuxInt = 4
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpMIPS64MOVWstore, TypeMem)
+               v3.AuxInt = 0
+               v3.AddArg(ptr)
+               v4 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v4.AuxInt = 0
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0
+       // result: (MOVVstore [8] ptr (MOVVconst [0])           (MOVVstore [0] ptr (MOVVconst [0]) mem))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVstore)
+               v.AuxInt = 8
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVVstore, TypeMem)
+               v1.AuxInt = 0
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0
+       // result: (MOVVstore [16] ptr (MOVVconst [0])          (MOVVstore [8] ptr (MOVVconst [0])                      (MOVVstore [0] ptr (MOVVconst [0]) mem)))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVstore)
+               v.AuxInt = 16
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64MOVVstore, TypeMem)
+               v1.AuxInt = 8
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpMIPS64MOVVstore, TypeMem)
+               v3.AuxInt = 0
+               v3.AddArg(ptr)
+               v4 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+               v4.AuxInt = 0
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size() <= 8*128        && SizeAndAlign(s).Align()%8 == 0 && !config.noDuffDevice
+       // result: (DUFFZERO [8 * (128 - int64(SizeAndAlign(s).Size()/8))] ptr mem)
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size() <= 8*128 && SizeAndAlign(s).Align()%8 == 0 && !config.noDuffDevice) {
+                       break
+               }
+               v.reset(OpMIPS64DUFFZERO)
+               v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/8))
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: (SizeAndAlign(s).Size() > 8*128 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0
+       // result: (LoweredZero [SizeAndAlign(s).Align()]               ptr             (ADDVconst <ptr.Type> ptr [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])           mem)
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !((SizeAndAlign(s).Size() > 8*128 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0) {
+                       break
+               }
+               v.reset(OpMIPS64LoweredZero)
+               v.AuxInt = SizeAndAlign(s).Align()
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpMIPS64ADDVconst, ptr.Type)
+               v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+               v0.AddArg(ptr)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
 func rewriteValueMIPS64_OpZeroExt16to32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
index 86f1461c42b58a9a529c4625f75e768912daf06b..bf3fb27f905e794112ef50f0c7eec8c860729917 100644 (file)
@@ -84,7 +84,7 @@ func schedule(f *Func) {
                // Compute score. Larger numbers are scheduled closer to the end of the block.
                for _, v := range b.Values {
                        switch {
-                       case v.Op == OpAMD64LoweredGetClosurePtr || v.Op == OpPPC64LoweredGetClosurePtr || v.Op == OpARMLoweredGetClosurePtr || v.Op == OpARM64LoweredGetClosurePtr || v.Op == Op386LoweredGetClosurePtr:
+                       case v.Op == OpAMD64LoweredGetClosurePtr || v.Op == OpPPC64LoweredGetClosurePtr || v.Op == OpARMLoweredGetClosurePtr || v.Op == OpARM64LoweredGetClosurePtr || v.Op == Op386LoweredGetClosurePtr || v.Op == OpMIPS64LoweredGetClosurePtr:
                                // We also score GetLoweredClosurePtr as early as possible to ensure that the
                                // context register is not stomped. GetLoweredClosurePtr should only appear
                                // in the entry block where there are no phi functions, so there is no
index 6f2c5bbbfcdc26ce12d2bba4a1b83cb9d4d1aee4..5a6474c92fc79bbb47b939434d0c4cdc66f6a6aa 100644 (file)
@@ -400,19 +400,23 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
                                break
                        }
 
-                       if p.To.Sym != nil { // retjmp
-                               p.As = AJMP
-                               p.To.Type = obj.TYPE_BRANCH
-                               break
-                       }
+                       retSym := p.To.Sym
+                       p.To.Name = obj.NAME_NONE // clear fields as we may modify p to other instruction
+                       p.To.Sym = nil
 
                        if cursym.Text.Mark&LEAF != 0 {
                                if autosize == 0 {
                                        p.As = AJMP
                                        p.From = obj.Addr{}
-                                       p.To.Type = obj.TYPE_MEM
-                                       p.To.Offset = 0
-                                       p.To.Reg = REGLINK
+                                       if retSym != nil { // retjmp
+                                               p.To.Type = obj.TYPE_BRANCH
+                                               p.To.Name = obj.NAME_EXTERN
+                                               p.To.Sym = retSym
+                                       } else {
+                                               p.To.Type = obj.TYPE_MEM
+                                               p.To.Reg = REGLINK
+                                               p.To.Offset = 0
+                                       }
                                        p.Mark |= BRANCH
                                        break
                                }
@@ -444,22 +448,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
                        p.From.Reg = REGSP
                        p.To.Type = obj.TYPE_REG
                        p.To.Reg = REG_R4
-
-                       if false {
-                               // Debug bad returns
-                               q = ctxt.NewProg()
-
-                               q.As = AMOVV
-                               q.Lineno = p.Lineno
-                               q.From.Type = obj.TYPE_MEM
-                               q.From.Offset = 0
-                               q.From.Reg = REG_R4
-                               q.To.Type = obj.TYPE_REG
-                               q.To.Reg = REGTMP
-
-                               q.Link = p.Link
-                               p.Link = q
-                               p = q
+                       if retSym != nil { // retjmp from non-leaf, need to restore LINK register
+                               p.To.Reg = REGLINK
                        }
 
                        if autosize != 0 {
@@ -479,9 +469,15 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
                        q1 = ctxt.NewProg()
                        q1.As = AJMP
                        q1.Lineno = p.Lineno
-                       q1.To.Type = obj.TYPE_MEM
-                       q1.To.Offset = 0
-                       q1.To.Reg = REG_R4
+                       if retSym != nil { // retjmp
+                               q1.To.Type = obj.TYPE_BRANCH
+                               q1.To.Name = obj.NAME_EXTERN
+                               q1.To.Sym = retSym
+                       } else {
+                               q1.To.Type = obj.TYPE_MEM
+                               q1.To.Offset = 0
+                               q1.To.Reg = REG_R4
+                       }
                        q1.Mark |= BRANCH
                        q1.Spadj = +autosize