]> Cypherpunks repositories - gostls13.git/commitdiff
cmd/compile: add MIPS64 optimizations, SSA on by default
authorCherry Zhang <cherryyz@google.com>
Thu, 25 Aug 2016 19:52:45 +0000 (15:52 -0400)
committerCherry Zhang <cherryyz@google.com>
Fri, 26 Aug 2016 19:45:06 +0000 (19:45 +0000)
Add the following optimizations:
- fold constants
- fold address into load/store
- simplify extensions and conditional branches
- remove nil checks

Turn on SSA on MIPS64 by default, and toggle the tests.

Fixes #16359.

Change-Id: I7f1e38c2509e22e42cd024e712990ebbe47176bd
Reviewed-on: https://go-review.googlesource.com/27870
Run-TryBot: Cherry Zhang <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>

src/cmd/compile/internal/gc/ssa.go
src/cmd/compile/internal/mips64/ssa.go
src/cmd/compile/internal/ssa/gen/MIPS64.rules
src/cmd/compile/internal/ssa/gen/MIPS64Ops.go
src/cmd/compile/internal/ssa/opGen.go
src/cmd/compile/internal/ssa/rewriteMIPS64.go
test/live.go
test/live_ssa.go
test/nilptr3_ssa.go
test/sliceopt.go

index 9dcdb661cd0b842d8f621224573592434eacd084..01701d99f2054bf549a267fdf1ddbe0455dc6cf0 100644 (file)
@@ -40,7 +40,7 @@ func shouldssa(fn *Node) bool {
                if os.Getenv("SSATEST") == "" {
                        return false
                }
-       case "amd64", "amd64p32", "arm", "386", "arm64", "ppc64le":
+       case "amd64", "amd64p32", "arm", "386", "arm64", "ppc64le", "mips64", "mips64le":
                // Generally available.
        }
        if !ssaEnabled {
index ca859d6c5c08ffda518365c349feb04faa995f8d..da30ddc6cff91bbae44ae90312029586b0307b4c 100644 (file)
@@ -421,7 +421,32 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                ssa.OpMIPS64MOVHUreg,
                ssa.OpMIPS64MOVWreg,
                ssa.OpMIPS64MOVWUreg:
-               // TODO: remove extension if after proper load
+               a := v.Args[0]
+               for a.Op == ssa.OpCopy || a.Op == ssa.OpMIPS64MOVVreg {
+                       a = a.Args[0]
+               }
+               if a.Op == ssa.OpLoadReg {
+                       t := a.Type
+                       switch {
+                       case v.Op == ssa.OpMIPS64MOVBreg && t.Size() == 1 && t.IsSigned(),
+                               v.Op == ssa.OpMIPS64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
+                               v.Op == ssa.OpMIPS64MOVHreg && t.Size() == 2 && t.IsSigned(),
+                               v.Op == ssa.OpMIPS64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
+                               v.Op == ssa.OpMIPS64MOVWreg && t.Size() == 4 && t.IsSigned(),
+                               v.Op == ssa.OpMIPS64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+                               // arg is a proper-typed load, already zero/sign-extended, don't extend again
+                               if gc.SSARegNum(v) == gc.SSARegNum(v.Args[0]) {
+                                       return
+                               }
+                               p := gc.Prog(mips.AMOVV)
+                               p.From.Type = obj.TYPE_REG
+                               p.From.Reg = gc.SSARegNum(v.Args[0])
+                               p.To.Type = obj.TYPE_REG
+                               p.To.Reg = gc.SSARegNum(v)
+                               return
+                       default:
+                       }
+               }
                fallthrough
        case ssa.OpMIPS64MOVWF,
                ssa.OpMIPS64MOVWD,
@@ -613,7 +638,64 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                        gc.Maxarg = v.AuxInt
                }
        case ssa.OpMIPS64LoweredNilCheck:
-               // TODO: optimization
+               // Optimization - if the subsequent block has a load or store
+               // at the same address, we don't need to issue this instruction.
+               mem := v.Args[1]
+               for _, w := range v.Block.Succs[0].Block().Values {
+                       if w.Op == ssa.OpPhi {
+                               if w.Type.IsMemory() {
+                                       mem = w
+                               }
+                               continue
+                       }
+                       if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
+                               // w doesn't use a store - can't be a memory op.
+                               continue
+                       }
+                       if w.Args[len(w.Args)-1] != mem {
+                               v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
+                       }
+                       switch w.Op {
+                       case ssa.OpMIPS64MOVBload, ssa.OpMIPS64MOVBUload, ssa.OpMIPS64MOVHload, ssa.OpMIPS64MOVHUload,
+                               ssa.OpMIPS64MOVWload, ssa.OpMIPS64MOVWUload, ssa.OpMIPS64MOVVload,
+                               ssa.OpMIPS64MOVFload, ssa.OpMIPS64MOVDload,
+                               ssa.OpMIPS64MOVBstore, ssa.OpMIPS64MOVHstore, ssa.OpMIPS64MOVWstore, ssa.OpMIPS64MOVVstore,
+                               ssa.OpMIPS64MOVFstore, ssa.OpMIPS64MOVDstore:
+                               // arg0 is ptr, auxint is offset
+                               if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
+                                       if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+                                               gc.Warnl(v.Line, "removed nil check")
+                                       }
+                                       return
+                               }
+                       case ssa.OpMIPS64DUFFZERO, ssa.OpMIPS64LoweredZero:
+                               // arg0 is ptr
+                               if w.Args[0] == v.Args[0] {
+                                       if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+                                               gc.Warnl(v.Line, "removed nil check")
+                                       }
+                                       return
+                               }
+                       case ssa.OpMIPS64LoweredMove:
+                               // arg0 is dst ptr, arg1 is src ptr
+                               if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] {
+                                       if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+                                               gc.Warnl(v.Line, "removed nil check")
+                                       }
+                                       return
+                               }
+                       default:
+                       }
+                       if w.Type.IsMemory() {
+                               if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
+                                       // these ops are OK
+                                       mem = w
+                                       continue
+                               }
+                               // We can't delay the nil check past the next store.
+                               break
+                       }
+               }
                // Issue a load which will fault if arg is nil.
                p := gc.Prog(mips.AMOVB)
                p.From.Type = obj.TYPE_MEM
index c342f76a2c616d2fede3f92b62b79019e0a1b2a3..a53bd2169edb727c98db633b56e50d416aacc495 100644 (file)
 // Absorb boolean tests into block
 (NE (FPFlagTrue cmp) yes no) -> (FPT cmp yes no)
 (NE (FPFlagFalse cmp) yes no) -> (FPF cmp yes no)
+(EQ (FPFlagTrue cmp) yes no) -> (FPF cmp yes no)
+(EQ (FPFlagFalse cmp) yes no) -> (FPT cmp yes no)
+(NE (XORconst [1] cmp:(SGT _ _)) yes no) -> (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTU _ _)) yes no) -> (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTconst _)) yes no) -> (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTUconst _)) yes no) -> (EQ cmp yes no)
+(EQ (XORconst [1] cmp:(SGT _ _)) yes no) -> (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) -> (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTconst _)) yes no) -> (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) -> (NE cmp yes no)
+(NE (SGTUconst [1] x) yes no) -> (EQ x yes no)
+(EQ (SGTUconst [1] x) yes no) -> (NE x yes no)
+(NE (SGTU x (MOVVconst [0])) yes no) -> (NE x yes no)
+(EQ (SGTU x (MOVVconst [0])) yes no) -> (EQ x yes no)
+(NE (SGTconst [0] x) yes no) -> (LTZ x yes no)
+(EQ (SGTconst [0] x) yes no) -> (GEZ x yes no)
+(NE (SGT x (MOVVconst [0])) yes no) -> (GTZ x yes no)
+(EQ (SGT x (MOVVconst [0])) yes no) -> (LEZ x yes no)
+
+// fold offset into address
+(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) -> (MOVVaddr [off1+off2] {sym} ptr)
+
+// fold address into load/store
+(MOVBload  [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload  [off1+off2] {sym} ptr mem)
+(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBUload [off1+off2] {sym} ptr mem)
+(MOVHload  [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHload  [off1+off2] {sym} ptr mem)
+(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHUload [off1+off2] {sym} ptr mem)
+(MOVWload  [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload  [off1+off2] {sym} ptr mem)
+(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWUload [off1+off2] {sym} ptr mem)
+(MOVVload  [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVVload  [off1+off2] {sym} ptr mem)
+(MOVFload  [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVFload  [off1+off2] {sym} ptr mem)
+(MOVDload  [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVDload  [off1+off2] {sym} ptr mem)
+
+(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVVstore [off1+off2] {sym} ptr val mem)
+(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVFstore [off1+off2] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} ptr val mem)
+(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBstorezero [off1+off2] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHstorezero [off1+off2] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWstorezero [off1+off2] {sym} ptr mem)
+(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVVstorezero [off1+off2] {sym} ptr mem)
+
+(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVVload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVVstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+       (MOVVstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+// store zero
+(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
+(MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) -> (MOVVstorezero [off] {sym} ptr mem)
+
+// don't extend after proper load
+(MOVBreg x:(MOVBload _ _)) -> (MOVVreg x)
+(MOVBUreg x:(MOVBUload _ _)) -> (MOVVreg x)
+(MOVHreg x:(MOVBload _ _)) -> (MOVVreg x)
+(MOVHreg x:(MOVBUload _ _)) -> (MOVVreg x)
+(MOVHreg x:(MOVHload _ _)) -> (MOVVreg x)
+(MOVHUreg x:(MOVBUload _ _)) -> (MOVVreg x)
+(MOVHUreg x:(MOVHUload _ _)) -> (MOVVreg x)
+(MOVWreg x:(MOVBload _ _)) -> (MOVVreg x)
+(MOVWreg x:(MOVBUload _ _)) -> (MOVVreg x)
+(MOVWreg x:(MOVHload _ _)) -> (MOVVreg x)
+(MOVWreg x:(MOVHUload _ _)) -> (MOVVreg x)
+(MOVWreg x:(MOVWload _ _)) -> (MOVVreg x)
+(MOVWUreg x:(MOVBUload _ _)) -> (MOVVreg x)
+(MOVWUreg x:(MOVHUload _ _)) -> (MOVVreg x)
+(MOVWUreg x:(MOVWUload _ _)) -> (MOVVreg x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) -> (MOVVreg x)
+(MOVBUreg x:(MOVBUreg _)) -> (MOVVreg x)
+(MOVHreg x:(MOVBreg _)) -> (MOVVreg x)
+(MOVHreg x:(MOVBUreg _)) -> (MOVVreg x)
+(MOVHreg x:(MOVHreg _)) -> (MOVVreg x)
+(MOVHUreg x:(MOVBUreg _)) -> (MOVVreg x)
+(MOVHUreg x:(MOVHUreg _)) -> (MOVVreg x)
+(MOVWreg x:(MOVBreg _)) -> (MOVVreg x)
+(MOVWreg x:(MOVBUreg _)) -> (MOVVreg x)
+(MOVWreg x:(MOVHreg _)) -> (MOVVreg x)
+(MOVWreg x:(MOVHreg _)) -> (MOVVreg x)
+(MOVWreg x:(MOVWreg _)) -> (MOVVreg x)
+(MOVWUreg x:(MOVBUreg _)) -> (MOVVreg x)
+(MOVWUreg x:(MOVHUreg _)) -> (MOVVreg x)
+(MOVWUreg x:(MOVWUreg _)) -> (MOVVreg x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVVnop doesn't emit instruction, only for ensuring the type.
+(MOVVreg x) && x.Uses == 1 -> (MOVVnop x)
+
+// fold constant into arithmatic ops
+(ADDV (MOVVconst [c]) x) && is32Bit(c) -> (ADDVconst [c] x)
+(ADDV x (MOVVconst [c])) && is32Bit(c) -> (ADDVconst [c] x)
+(SUBV x (MOVVconst [c])) && is32Bit(c) -> (SUBVconst [c] x)
+(AND (MOVVconst [c]) x) && is32Bit(c) -> (ANDconst [c] x)
+(AND x (MOVVconst [c])) && is32Bit(c) -> (ANDconst [c] x)
+(OR  (MOVVconst [c]) x) && is32Bit(c) -> (ORconst  [c] x)
+(OR  x (MOVVconst [c])) && is32Bit(c) -> (ORconst  [c] x)
+(XOR (MOVVconst [c]) x) && is32Bit(c) -> (XORconst [c] x)
+(XOR x (MOVVconst [c])) && is32Bit(c) -> (XORconst [c] x)
+(NOR (MOVVconst [c]) x) && is32Bit(c) -> (NORconst [c] x)
+(NOR x (MOVVconst [c])) && is32Bit(c) -> (NORconst [c] x)
+
+(SLLV _ (MOVVconst [c])) && uint64(c)>=64 -> (MOVVconst [0])
+(SRLV _ (MOVVconst [c])) && uint64(c)>=64 -> (MOVVconst [0])
+(SRAV x (MOVVconst [c])) && uint64(c)>=64 -> (SRAVconst x [63])
+(SLLV x (MOVVconst [c])) -> (SLLVconst x [c])
+(SRLV x (MOVVconst [c])) -> (SRLVconst x [c])
+(SRAV x (MOVVconst [c])) -> (SRAVconst x [c])
+
+(SGT  (MOVVconst [c]) x) && is32Bit(c) -> (SGTconst  [c] x)
+(SGTU (MOVVconst [c]) x) && is32Bit(c) -> (SGTUconst [c] x)
+
+// mul by constant
+(Select1 (MULVU x (MOVVconst [-1]))) -> (NEGV x)
+(Select1 (MULVU _ (MOVVconst [0]))) -> (MOVVconst [0])
+(Select1 (MULVU x (MOVVconst [1]))) -> x
+(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo(c) -> (SLLVconst [log2(c)] x)
+
+(Select1 (MULVU (MOVVconst [-1]) x)) -> (NEGV x)
+(Select1 (MULVU (MOVVconst [0]) _)) -> (MOVVconst [0])
+(Select1 (MULVU (MOVVconst [1]) x)) -> x
+(Select1 (MULVU (MOVVconst [c]) x)) && isPowerOfTwo(c) -> (SLLVconst [log2(c)] x)
+
+// div by constant
+(Select1 (DIVVU x (MOVVconst [1]))) -> x
+(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo(c) -> (SRLVconst [log2(c)] x)
+(Select0 (DIVVU _ (MOVVconst [1]))) -> (MOVVconst [0])                       // mod
+(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo(c) -> (ANDconst [c-1] x) // mod
+
+// generic simplifications
+(ADDV x (NEGV y)) -> (SUBV x y)
+(ADDV (NEGV y) x) -> (SUBV x y)
+(SUBV x x) -> (MOVVconst [0])
+(SUBV (MOVVconst [0]) x) -> (NEGV x)
+(AND x x) -> x
+(OR  x x) -> x
+(XOR x x) -> (MOVVconst [0])
+
+// remove redundant *const ops
+(ADDVconst [0]  x) -> x
+(SUBVconst [0]  x) -> x
+(ANDconst [0]  _) -> (MOVVconst [0])
+(ANDconst [-1] x) -> x
+(ORconst  [0]  x) -> x
+(ORconst  [-1] _) -> (MOVVconst [-1])
+(XORconst [0]  x) -> x
+(XORconst [-1] x) -> (NORconst [0] x)
+
+// generic constant folding
+(ADDVconst [c] (MOVVconst [d]))  -> (MOVVconst [c+d])
+(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) -> (ADDVconst [c+d] x)
+(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) -> (ADDVconst [c-d] x)
+(SUBVconst [c] (MOVVconst [d]))  -> (MOVVconst [d-c])
+(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) -> (ADDVconst [-c-d] x)
+(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) -> (ADDVconst [-c+d] x)
+(SLLVconst [c] (MOVVconst [d]))  -> (MOVVconst [int64(d)<<uint64(c)])
+(SRLVconst [c] (MOVVconst [d]))  -> (MOVVconst [int64(uint64(d)>>uint64(c))])
+(SRAVconst [c] (MOVVconst [d]))  -> (MOVVconst [int64(d)>>uint64(c)])
+(Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) -> (MOVVconst [c*d])
+(Select1 (DIVV  (MOVVconst [c]) (MOVVconst [d]))) -> (MOVVconst [int64(c)/int64(d)])
+(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) -> (MOVVconst [int64(uint64(c)/uint64(d))])
+(Select0 (DIVV  (MOVVconst [c]) (MOVVconst [d]))) -> (MOVVconst [int64(c)%int64(d)])   // mod
+(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) -> (MOVVconst [int64(uint64(c)%uint64(d))]) // mod
+(ANDconst [c] (MOVVconst [d])) -> (MOVVconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
+(ORconst [c] (MOVVconst [d])) -> (MOVVconst [c|d])
+(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) -> (ORconst [c|d] x)
+(XORconst [c] (MOVVconst [d])) -> (MOVVconst [c^d])
+(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) -> (XORconst [c^d] x)
+(NORconst [c] (MOVVconst [d])) -> (MOVVconst [^(c|d)])
+(NEGV (MOVVconst [c])) -> (MOVVconst [-c])
+(MOVBreg  (MOVVconst [c])) -> (MOVVconst [int64(int8(c))])
+(MOVBUreg (MOVVconst [c])) -> (MOVVconst [int64(uint8(c))])
+(MOVHreg  (MOVVconst [c])) -> (MOVVconst [int64(int16(c))])
+(MOVHUreg (MOVVconst [c])) -> (MOVVconst [int64(uint16(c))])
+(MOVWreg  (MOVVconst [c])) -> (MOVVconst [int64(int32(c))])
+(MOVWUreg (MOVVconst [c])) -> (MOVVconst [int64(uint32(c))])
+(MOVVreg  (MOVVconst [c])) -> (MOVVconst [c])
+
+// constant comparisons
+(SGTconst [c] (MOVVconst [d])) && int64(c)>int64(d) -> (MOVVconst [1])
+(SGTconst [c] (MOVVconst [d])) && int64(c)<=int64(d) -> (MOVVconst [0])
+(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) -> (MOVVconst [1])
+(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) -> (MOVVconst [0])
+
+// other known comparisons
+(SGTconst [c] (MOVBreg _)) && 0x7f < int64(c) -> (MOVVconst [1])
+(SGTconst [c] (MOVBreg _)) && int64(c) <= -0x80 -> (MOVVconst [0])
+(SGTconst [c] (MOVBUreg _)) && 0xff < int64(c) -> (MOVVconst [1])
+(SGTconst [c] (MOVBUreg _)) && int64(c) < 0 -> (MOVVconst [0])
+(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) -> (MOVVconst [1])
+(SGTconst [c] (MOVHreg _)) && 0x7fff < int64(c) -> (MOVVconst [1])
+(SGTconst [c] (MOVHreg _)) && int64(c) <= -0x8000 -> (MOVVconst [0])
+(SGTconst [c] (MOVHUreg _)) && 0xffff < int64(c) -> (MOVVconst [1])
+(SGTconst [c] (MOVHUreg _)) && int64(c) < 0 -> (MOVVconst [0])
+(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) -> (MOVVconst [1])
+(SGTconst [c] (MOVWUreg _)) && int64(c) < 0 -> (MOVVconst [0])
+(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c -> (MOVVconst [1])
+(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) -> (MOVVconst [1])
+(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 1<<uint64(64-d) <= c -> (MOVVconst [1])
+(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 1<<uint64(64-d) <= uint64(c) -> (MOVVconst [1])
+
+// absorb constants into branches
+(EQ  (MOVVconst [0]) yes no) -> (First nil yes no)
+(EQ  (MOVVconst [c]) yes no) && c != 0 -> (First nil no yes)
+(NE  (MOVVconst [0]) yes no) -> (First nil no yes)
+(NE  (MOVVconst [c]) yes no) && c != 0 -> (First nil yes no)
+(LTZ (MOVVconst [c]) yes no) && c <  0 -> (First nil yes no)
+(LTZ (MOVVconst [c]) yes no) && c >= 0 -> (First nil no yes)
+(LEZ (MOVVconst [c]) yes no) && c <= 0 -> (First nil yes no)
+(LEZ (MOVVconst [c]) yes no) && c >  0 -> (First nil no yes)
+(GTZ (MOVVconst [c]) yes no) && c >  0 -> (First nil yes no)
+(GTZ (MOVVconst [c]) yes no) && c <= 0 -> (First nil no yes)
+(GEZ (MOVVconst [c]) yes no) && c >= 0 -> (First nil yes no)
+(GEZ (MOVVconst [c]) yes no) && c <  0 -> (First nil no yes)
index 1d52e394f4108da8ad4dbb1b1e8435b35879631f..0feced5c73e33c95a4453d209deb0e119f5c089b 100644 (file)
@@ -201,9 +201,9 @@ func init() {
 
                // comparisons
                {name: "SGT", argLength: 2, reg: gp21, asm: "SGT", typ: "Bool"},                      // 1 if arg0 > arg1 (signed), 0 otherwise
-               {name: "SGTconst", argLength: 2, reg: gp21, asm: "SGT", aux: "Int64", typ: "Bool"},   // 1 if arg0 > auxInt (signed), 0 otherwise
+               {name: "SGTconst", argLength: 1, reg: gp11, asm: "SGT", aux: "Int64", typ: "Bool"},   // 1 if auxInt > arg0 (signed), 0 otherwise
                {name: "SGTU", argLength: 2, reg: gp21, asm: "SGTU", typ: "Bool"},                    // 1 if arg0 > arg1 (unsigned), 0 otherwise
-               {name: "SGTUconst", argLength: 2, reg: gp21, asm: "SGTU", aux: "Int64", typ: "Bool"}, // 1 if arg0 > auxInt (unsigned), 0 otherwise
+               {name: "SGTUconst", argLength: 1, reg: gp11, asm: "SGTU", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (unsigned), 0 otherwise
 
                {name: "CMPEQF", argLength: 2, reg: fp2flags, asm: "CMPEQF", typ: "Flags"}, // flags=true if arg0 = arg1, float32
                {name: "CMPEQD", argLength: 2, reg: fp2flags, asm: "CMPEQD", typ: "Flags"}, // flags=true if arg0 = arg1, float64
index f8e2d8979f084cb2228f25b2473538ae961e5aa6..f5ebaf467f4e895142e0c70291ccf2e6db3721b7 100644 (file)
@@ -12421,12 +12421,11 @@ var opcodeTable = [...]opInfo{
        {
                name:    "SGTconst",
                auxType: auxInt64,
-               argLen:  2,
+               argLen:  1,
                asm:     mips.ASGT,
                reg: regInfo{
                        inputs: []inputInfo{
                                {0, 100663294}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g
-                               {1, 100663294}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g
                        },
                        outputs: []outputInfo{
                                {0, 33554430}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25
@@ -12450,12 +12449,11 @@ var opcodeTable = [...]opInfo{
        {
                name:    "SGTUconst",
                auxType: auxInt64,
-               argLen:  2,
+               argLen:  1,
                asm:     mips.ASGTU,
                reg: regInfo{
                        inputs: []inputInfo{
                                {0, 100663294}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g
-                               {1, 100663294}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g
                        },
                        outputs: []outputInfo{
                                {0, 33554430}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25
index 1c82c0c94940f87cf994274ca65c7c5636dddef1..2bda6a79aa59c64f694ed09e5d0e4b6da5813eb0 100644 (file)
@@ -264,6 +264,104 @@ func rewriteValueMIPS64(v *Value, config *Config) bool {
                return rewriteValueMIPS64_OpLsh8x64(v, config)
        case OpLsh8x8:
                return rewriteValueMIPS64_OpLsh8x8(v, config)
+       case OpMIPS64ADDV:
+               return rewriteValueMIPS64_OpMIPS64ADDV(v, config)
+       case OpMIPS64ADDVconst:
+               return rewriteValueMIPS64_OpMIPS64ADDVconst(v, config)
+       case OpMIPS64AND:
+               return rewriteValueMIPS64_OpMIPS64AND(v, config)
+       case OpMIPS64ANDconst:
+               return rewriteValueMIPS64_OpMIPS64ANDconst(v, config)
+       case OpMIPS64MOVBUload:
+               return rewriteValueMIPS64_OpMIPS64MOVBUload(v, config)
+       case OpMIPS64MOVBUreg:
+               return rewriteValueMIPS64_OpMIPS64MOVBUreg(v, config)
+       case OpMIPS64MOVBload:
+               return rewriteValueMIPS64_OpMIPS64MOVBload(v, config)
+       case OpMIPS64MOVBreg:
+               return rewriteValueMIPS64_OpMIPS64MOVBreg(v, config)
+       case OpMIPS64MOVBstore:
+               return rewriteValueMIPS64_OpMIPS64MOVBstore(v, config)
+       case OpMIPS64MOVBstorezero:
+               return rewriteValueMIPS64_OpMIPS64MOVBstorezero(v, config)
+       case OpMIPS64MOVDload:
+               return rewriteValueMIPS64_OpMIPS64MOVDload(v, config)
+       case OpMIPS64MOVDstore:
+               return rewriteValueMIPS64_OpMIPS64MOVDstore(v, config)
+       case OpMIPS64MOVFload:
+               return rewriteValueMIPS64_OpMIPS64MOVFload(v, config)
+       case OpMIPS64MOVFstore:
+               return rewriteValueMIPS64_OpMIPS64MOVFstore(v, config)
+       case OpMIPS64MOVHUload:
+               return rewriteValueMIPS64_OpMIPS64MOVHUload(v, config)
+       case OpMIPS64MOVHUreg:
+               return rewriteValueMIPS64_OpMIPS64MOVHUreg(v, config)
+       case OpMIPS64MOVHload:
+               return rewriteValueMIPS64_OpMIPS64MOVHload(v, config)
+       case OpMIPS64MOVHreg:
+               return rewriteValueMIPS64_OpMIPS64MOVHreg(v, config)
+       case OpMIPS64MOVHstore:
+               return rewriteValueMIPS64_OpMIPS64MOVHstore(v, config)
+       case OpMIPS64MOVHstorezero:
+               return rewriteValueMIPS64_OpMIPS64MOVHstorezero(v, config)
+       case OpMIPS64MOVVload:
+               return rewriteValueMIPS64_OpMIPS64MOVVload(v, config)
+       case OpMIPS64MOVVreg:
+               return rewriteValueMIPS64_OpMIPS64MOVVreg(v, config)
+       case OpMIPS64MOVVstore:
+               return rewriteValueMIPS64_OpMIPS64MOVVstore(v, config)
+       case OpMIPS64MOVVstorezero:
+               return rewriteValueMIPS64_OpMIPS64MOVVstorezero(v, config)
+       case OpMIPS64MOVWUload:
+               return rewriteValueMIPS64_OpMIPS64MOVWUload(v, config)
+       case OpMIPS64MOVWUreg:
+               return rewriteValueMIPS64_OpMIPS64MOVWUreg(v, config)
+       case OpMIPS64MOVWload:
+               return rewriteValueMIPS64_OpMIPS64MOVWload(v, config)
+       case OpMIPS64MOVWreg:
+               return rewriteValueMIPS64_OpMIPS64MOVWreg(v, config)
+       case OpMIPS64MOVWstore:
+               return rewriteValueMIPS64_OpMIPS64MOVWstore(v, config)
+       case OpMIPS64MOVWstorezero:
+               return rewriteValueMIPS64_OpMIPS64MOVWstorezero(v, config)
+       case OpMIPS64NEGV:
+               return rewriteValueMIPS64_OpMIPS64NEGV(v, config)
+       case OpMIPS64NOR:
+               return rewriteValueMIPS64_OpMIPS64NOR(v, config)
+       case OpMIPS64NORconst:
+               return rewriteValueMIPS64_OpMIPS64NORconst(v, config)
+       case OpMIPS64OR:
+               return rewriteValueMIPS64_OpMIPS64OR(v, config)
+       case OpMIPS64ORconst:
+               return rewriteValueMIPS64_OpMIPS64ORconst(v, config)
+       case OpMIPS64SGT:
+               return rewriteValueMIPS64_OpMIPS64SGT(v, config)
+       case OpMIPS64SGTU:
+               return rewriteValueMIPS64_OpMIPS64SGTU(v, config)
+       case OpMIPS64SGTUconst:
+               return rewriteValueMIPS64_OpMIPS64SGTUconst(v, config)
+       case OpMIPS64SGTconst:
+               return rewriteValueMIPS64_OpMIPS64SGTconst(v, config)
+       case OpMIPS64SLLV:
+               return rewriteValueMIPS64_OpMIPS64SLLV(v, config)
+       case OpMIPS64SLLVconst:
+               return rewriteValueMIPS64_OpMIPS64SLLVconst(v, config)
+       case OpMIPS64SRAV:
+               return rewriteValueMIPS64_OpMIPS64SRAV(v, config)
+       case OpMIPS64SRAVconst:
+               return rewriteValueMIPS64_OpMIPS64SRAVconst(v, config)
+       case OpMIPS64SRLV:
+               return rewriteValueMIPS64_OpMIPS64SRLV(v, config)
+       case OpMIPS64SRLVconst:
+               return rewriteValueMIPS64_OpMIPS64SRLVconst(v, config)
+       case OpMIPS64SUBV:
+               return rewriteValueMIPS64_OpMIPS64SUBV(v, config)
+       case OpMIPS64SUBVconst:
+               return rewriteValueMIPS64_OpMIPS64SUBVconst(v, config)
+       case OpMIPS64XOR:
+               return rewriteValueMIPS64_OpMIPS64XOR(v, config)
+       case OpMIPS64XORconst:
+               return rewriteValueMIPS64_OpMIPS64XORconst(v, config)
        case OpMod16:
                return rewriteValueMIPS64_OpMod16(v, config)
        case OpMod16u:
@@ -402,6 +500,10 @@ func rewriteValueMIPS64(v *Value, config *Config) bool {
                return rewriteValueMIPS64_OpRsh8x64(v, config)
        case OpRsh8x8:
                return rewriteValueMIPS64_OpRsh8x8(v, config)
+       case OpSelect0:
+               return rewriteValueMIPS64_OpSelect0(v, config)
+       case OpSelect1:
+               return rewriteValueMIPS64_OpSelect1(v, config)
        case OpSignExt16to32:
                return rewriteValueMIPS64_OpSignExt16to32(v, config)
        case OpSignExt16to64:
@@ -3082,230 +3184,3319 @@ func rewriteValueMIPS64_OpLsh8x8(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueMIPS64_OpMod16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMIPS64ADDV(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod16 x y)
-       // cond:
-       // result: (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+       // match: (ADDV (MOVVconst [c]) x)
+       // cond: is32Bit(c)
+       // result: (ADDVconst [c] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpMIPS64ADDVconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (ADDV x (MOVVconst [c]))
+       // cond: is32Bit(c)
+       // result: (ADDVconst [c] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect0)
-               v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-               v1 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpMIPS64ADDVconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueMIPS64_OpMod16u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod16u x y)
+       // match: (ADDV x (NEGV y))
        // cond:
-       // result: (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+       // result: (SUBV x y)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect0)
-               v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64NEGV {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(OpMIPS64SUBV)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValueMIPS64_OpMod32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod32 x y)
+       // match: (ADDV (NEGV y) x)
        // cond:
-       // result: (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+       // result: (SUBV x y)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect0)
-               v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-               v1 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64NEGV {
+                       break
+               }
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpMIPS64SUBV)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueMIPS64_OpMod32u(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod32u x y)
+       // match: (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr))
        // cond:
-       // result: (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+       // result: (MOVVaddr [off1+off2] {sym} ptr)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect0)
-               v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym := v_0.Aux
+               ptr := v_0.Args[0]
+               v.reset(OpMIPS64MOVVaddr)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
                return true
        }
-}
-func rewriteValueMIPS64_OpMod64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod64 x y)
+       // match: (ADDVconst [0]  x)
        // cond:
-       // result: (Select0 (DIVV x y))
+       // result: x
        for {
+               if v.AuxInt != 0 {
+                       break
+               }
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect0)
-               v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueMIPS64_OpMod64u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod64u x y)
+       // match: (ADDVconst [c] (MOVVconst [d]))
        // cond:
-       // result: (Select0 (DIVVU x y))
+       // result: (MOVVconst [c+d])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect0)
-               v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = c + d
                return true
        }
+       // match: (ADDVconst [c] (ADDVconst [d] x))
+       // cond: is32Bit(c+d)
+       // result: (ADDVconst [c+d] x)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(is32Bit(c + d)) {
+                       break
+               }
+               v.reset(OpMIPS64ADDVconst)
+               v.AuxInt = c + d
+               v.AddArg(x)
+               return true
+       }
+       // match: (ADDVconst [c] (SUBVconst [d] x))
+       // cond: is32Bit(c-d)
+       // result: (ADDVconst [c-d] x)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64SUBVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(is32Bit(c - d)) {
+                       break
+               }
+               v.reset(OpMIPS64ADDVconst)
+               v.AuxInt = c - d
+               v.AddArg(x)
+               return true
+       }
+       return false
 }
-func rewriteValueMIPS64_OpMod8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMIPS64AND(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod8 x y)
-       // cond:
-       // result: (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+       // match: (AND (MOVVconst [c]) x)
+       // cond: is32Bit(c)
+       // result: (ANDconst [c] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpMIPS64ANDconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (AND x (MOVVconst [c]))
+       // cond: is32Bit(c)
+       // result: (ANDconst [c] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect0)
-               v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-               v1 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpMIPS64ANDconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueMIPS64_OpMod8u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod8u x y)
+       // match: (AND x x)
        // cond:
-       // result: (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect0)
-               v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueMIPS64_OpMove(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMIPS64ANDconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Move [s] _ _ mem)
-       // cond: SizeAndAlign(s).Size() == 0
-       // result: mem
+       // match: (ANDconst [0]  _)
+       // cond:
+       // result: (MOVVconst [0])
        for {
-               s := v.AuxInt
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 0) {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = mem.Type
-               v.AddArg(mem)
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 1
-       // result: (MOVBstore dst (MOVBload src mem) mem)
+       // match: (ANDconst [-1] x)
+       // cond:
+       // result: x
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 1) {
+               if v.AuxInt != -1 {
                        break
                }
-               v.reset(OpMIPS64MOVBstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
-       // result: (MOVHstore dst (MOVHload src mem) mem)
+       // match: (ANDconst [c] (MOVVconst [d]))
+       // cond:
+       // result: (MOVVconst [c&d])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
                        break
                }
-               v.reset(OpMIPS64MOVHstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = c & d
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 2
-       // result: (MOVBstore [1] dst (MOVBload [1] src mem)            (MOVBstore dst (MOVBload src mem) mem))
+       // match: (ANDconst [c] (ANDconst [d] x))
+       // cond:
+       // result: (ANDconst [c&d] x)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ANDconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpMIPS64ANDconst)
+               v.AuxInt = c & d
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBUload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVBUload [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBUload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBUload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBUreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVBUreg x:(MOVBUload _ _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVBUload {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVBUreg x:(MOVBUreg _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVBUreg {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVBUreg (MOVVconst [c]))
+       // cond:
+       // result: (MOVVconst [int64(uint8(c))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = int64(uint8(c))
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVBload  [off1] {sym} (ADDVconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVBload  [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVBreg x:(MOVBload _ _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVBload {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVBreg x:(MOVBreg _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVBreg {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVBreg  (MOVVconst [c]))
+       // cond:
+       // result: (MOVVconst [int64(int8(c))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = int64(int8(c))
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstore [off] {sym} ptr (MOVVconst [0]) mem)
+       // cond:
+       // result: (MOVBstorezero [off] {sym} ptr mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               if v_1.AuxInt != 0 {
+                       break
+               }
+               mem := v.Args[2]
+               v.reset(OpMIPS64MOVBstorezero)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+       // cond:
+       // result: (MOVBstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVBreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpMIPS64MOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+       // cond:
+       // result: (MOVBstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVBUreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpMIPS64MOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+       // cond:
+       // result: (MOVBstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVHreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpMIPS64MOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+       // cond:
+       // result: (MOVBstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVHUreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpMIPS64MOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+       // cond:
+       // result: (MOVBstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVWreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpMIPS64MOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
+       // cond:
+       // result: (MOVBstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVWUreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpMIPS64MOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVBstorezero [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBstorezero)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBstorezero)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVDload  [off1] {sym} (ADDVconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVDload  [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVDload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVDload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVFload  [off1] {sym} (ADDVconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVFload  [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVFload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVFload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVFstore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVFstore [off1+off2] {sym} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVFstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVFstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHUload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVHUload [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHUload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHUload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVHUreg x:(MOVBUload _ _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVBUload {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHUreg x:(MOVHUload _ _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVHUload {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHUreg x:(MOVBUreg _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVBUreg {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHUreg x:(MOVHUreg _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVHUreg {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHUreg (MOVVconst [c]))
+       // cond:
+       // result: (MOVVconst [int64(uint16(c))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = int64(uint16(c))
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVHload  [off1] {sym} (ADDVconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVHload  [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVHreg x:(MOVBload _ _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVBload {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHreg x:(MOVBUload _ _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVBUload {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHreg x:(MOVHload _ _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVHload {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHreg x:(MOVBreg _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVBreg {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHreg x:(MOVBUreg _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVBUreg {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHreg x:(MOVHreg _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVHreg {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHreg  (MOVVconst [c]))
+       // cond:
+       // result: (MOVVconst [int64(int16(c))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = int64(int16(c))
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHstore [off] {sym} ptr (MOVVconst [0]) mem)
+       // cond:
+       // result: (MOVHstorezero [off] {sym} ptr mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               if v_1.AuxInt != 0 {
+                       break
+               }
+               mem := v.Args[2]
+               v.reset(OpMIPS64MOVHstorezero)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+       // cond:
+       // result: (MOVHstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVHreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpMIPS64MOVHstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+       // cond:
+       // result: (MOVHstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVHUreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpMIPS64MOVHstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+       // cond:
+       // result: (MOVHstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVWreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpMIPS64MOVHstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
+       // cond:
+       // result: (MOVHstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVWUreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpMIPS64MOVHstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVHstorezero [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHstorezero)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHstorezero)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVVload  [off1] {sym} (ADDVconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVVload  [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVVload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVVreg x)
+       // cond: x.Uses == 1
+       // result: (MOVVnop x)
+       for {
+               x := v.Args[0]
+               if !(x.Uses == 1) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVnop)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVVreg  (MOVVconst [c]))
+       // cond:
+       // result: (MOVVconst [c])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = c
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVVstore [off1+off2] {sym} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVVstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVVstore [off] {sym} ptr (MOVVconst [0]) mem)
+       // cond:
+       // result: (MOVVstorezero [off] {sym} ptr mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               if v_1.AuxInt != 0 {
+                       break
+               }
+               mem := v.Args[2]
+               v.reset(OpMIPS64MOVVstorezero)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVVstorezero [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVstorezero)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVVstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVstorezero)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVWUload [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVWUload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVWUload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWUreg x:(MOVBUload _ _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVBUload {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWUreg x:(MOVHUload _ _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVHUload {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWUreg x:(MOVWUload _ _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVWUload {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWUreg x:(MOVBUreg _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVBUreg {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWUreg x:(MOVHUreg _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVHUreg {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWUreg x:(MOVWUreg _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVWUreg {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWUreg (MOVVconst [c]))
+       // cond:
+       // result: (MOVVconst [int64(uint32(c))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = int64(uint32(c))
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWload  [off1] {sym} (ADDVconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVWload  [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVWload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVWload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWreg x:(MOVBload _ _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVBload {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWreg x:(MOVBUload _ _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVBUload {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWreg x:(MOVHload _ _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVHload {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWreg x:(MOVHUload _ _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVHUload {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWreg x:(MOVWload _ _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVWload {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWreg x:(MOVBreg _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVBreg {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWreg x:(MOVBUreg _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVBUreg {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWreg x:(MOVHreg _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVHreg {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWreg x:(MOVHreg _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVHreg {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWreg x:(MOVWreg _))
+       // cond:
+       // result: (MOVVreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpMIPS64MOVWreg {
+                       break
+               }
+               v.reset(OpMIPS64MOVVreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWreg  (MOVVconst [c]))
+       // cond:
+       // result: (MOVVconst [int64(int32(c))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = int64(int32(c))
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVWstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVWstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [off] {sym} ptr (MOVVconst [0]) mem)
+       // cond:
+       // result: (MOVWstorezero [off] {sym} ptr mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               if v_1.AuxInt != 0 {
+                       break
+               }
+               mem := v.Args[2]
+               v.reset(OpMIPS64MOVWstorezero)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+       // cond:
+       // result: (MOVWstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVWreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpMIPS64MOVWstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
+       // cond:
+       // result: (MOVWstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVWUreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpMIPS64MOVWstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVWstorezero [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVWstorezero)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+       // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVWstorezero)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64NEGV(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NEGV (MOVVconst [c]))
+       // cond:
+       // result: (MOVVconst [-c])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = -c
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64NOR(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NOR (MOVVconst [c]) x)
+       // cond: is32Bit(c)
+       // result: (NORconst [c] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpMIPS64NORconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (NOR x (MOVVconst [c]))
+       // cond: is32Bit(c)
+       // result: (NORconst [c] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpMIPS64NORconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64NORconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NORconst [c] (MOVVconst [d]))
+       // cond:
+       // result: (MOVVconst [^(c|d)])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = ^(c | d)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64OR(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (OR  (MOVVconst [c]) x)
+       // cond: is32Bit(c)
+       // result: (ORconst  [c] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpMIPS64ORconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (OR  x (MOVVconst [c]))
+       // cond: is32Bit(c)
+       // result: (ORconst  [c] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpMIPS64ORconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (OR  x x)
+       // cond:
+       // result: x
+       for {
+               x := v.Args[0]
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64ORconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ORconst  [0]  x)
+       // cond:
+       // result: x
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (ORconst  [-1] _)
+       // cond:
+       // result: (MOVVconst [-1])
+       for {
+               if v.AuxInt != -1 {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = -1
+               return true
+       }
+       // match: (ORconst [c] (MOVVconst [d]))
+       // cond:
+       // result: (MOVVconst [c|d])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = c | d
+               return true
+       }
+       // match: (ORconst [c] (ORconst [d] x))
+       // cond: is32Bit(c|d)
+       // result: (ORconst [c|d] x)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ORconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(is32Bit(c | d)) {
+                       break
+               }
+               v.reset(OpMIPS64ORconst)
+               v.AuxInt = c | d
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64SGT(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SGT  (MOVVconst [c]) x)
+       // cond: is32Bit(c)
+       // result: (SGTconst  [c] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpMIPS64SGTconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64SGTU(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SGTU (MOVVconst [c]) x)
+       // cond: is32Bit(c)
+       // result: (SGTUconst [c] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpMIPS64SGTUconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64SGTUconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SGTUconst [c] (MOVVconst [d]))
+       // cond: uint64(c)>uint64(d)
+       // result: (MOVVconst [1])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               if !(uint64(c) > uint64(d)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SGTUconst [c] (MOVVconst [d]))
+       // cond: uint64(c)<=uint64(d)
+       // result: (MOVVconst [0])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               if !(uint64(c) <= uint64(d)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SGTUconst [c] (MOVBUreg _))
+       // cond: 0xff < uint64(c)
+       // result: (MOVVconst [1])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVBUreg {
+                       break
+               }
+               if !(0xff < uint64(c)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SGTUconst [c] (MOVHUreg _))
+       // cond: 0xffff < uint64(c)
+       // result: (MOVVconst [1])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVHUreg {
+                       break
+               }
+               if !(0xffff < uint64(c)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SGTUconst [c] (ANDconst [m] _))
+       // cond: uint64(m) < uint64(c)
+       // result: (MOVVconst [1])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ANDconst {
+                       break
+               }
+               m := v_0.AuxInt
+               if !(uint64(m) < uint64(c)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SGTUconst [c] (SRLVconst _ [d]))
+       // cond: 0 < d && d <= 63 && 1<<uint64(64-d) <= uint64(c)
+       // result: (MOVVconst [1])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64SRLVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               if !(0 < d && d <= 63 && 1<<uint64(64-d) <= uint64(c)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 1
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64SGTconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SGTconst [c] (MOVVconst [d]))
+       // cond: int64(c)>int64(d)
+       // result: (MOVVconst [1])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               if !(int64(c) > int64(d)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SGTconst [c] (MOVVconst [d]))
+       // cond: int64(c)<=int64(d)
+       // result: (MOVVconst [0])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               if !(int64(c) <= int64(d)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SGTconst [c] (MOVBreg _))
+       // cond: 0x7f < int64(c)
+       // result: (MOVVconst [1])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVBreg {
+                       break
+               }
+               if !(0x7f < int64(c)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SGTconst [c] (MOVBreg _))
+       // cond: int64(c) <= -0x80
+       // result: (MOVVconst [0])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVBreg {
+                       break
+               }
+               if !(int64(c) <= -0x80) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SGTconst [c] (MOVBUreg _))
+       // cond: 0xff < int64(c)
+       // result: (MOVVconst [1])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVBUreg {
+                       break
+               }
+               if !(0xff < int64(c)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SGTconst [c] (MOVBUreg _))
+       // cond: int64(c) < 0
+       // result: (MOVVconst [0])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVBUreg {
+                       break
+               }
+               if !(int64(c) < 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SGTconst [c] (MOVHreg _))
+       // cond: 0x7fff < int64(c)
+       // result: (MOVVconst [1])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVHreg {
+                       break
+               }
+               if !(0x7fff < int64(c)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SGTconst [c] (MOVHreg _))
+       // cond: int64(c) <= -0x8000
+       // result: (MOVVconst [0])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVHreg {
+                       break
+               }
+               if !(int64(c) <= -0x8000) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SGTconst [c] (MOVHUreg _))
+       // cond: 0xffff < int64(c)
+       // result: (MOVVconst [1])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVHUreg {
+                       break
+               }
+               if !(0xffff < int64(c)) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SGTconst [c] (MOVHUreg _))
+       // cond: int64(c) < 0
+       // result: (MOVVconst [0])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVHUreg {
+                       break
+               }
+               if !(int64(c) < 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SGTconst [c] (MOVWUreg _))
+       // cond: int64(c) < 0
+       // result: (MOVVconst [0])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVWUreg {
+                       break
+               }
+               if !(int64(c) < 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SGTconst [c] (ANDconst [m] _))
+       // cond: 0 <= m && m < c
+       // result: (MOVVconst [1])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ANDconst {
+                       break
+               }
+               m := v_0.AuxInt
+               if !(0 <= m && m < c) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SGTconst [c] (SRLVconst _ [d]))
+       // cond: 0 <= c && 0 < d && d <= 63 && 1<<uint64(64-d) <= c
+       // result: (MOVVconst [1])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64SRLVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               if !(0 <= c && 0 < d && d <= 63 && 1<<uint64(64-d) <= c) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 1
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64SLLV(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SLLV _ (MOVVconst [c]))
+       // cond: uint64(c)>=64
+       // result: (MOVVconst [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 64) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SLLV x (MOVVconst [c]))
+       // cond:
+       // result: (SLLVconst x [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpMIPS64SLLVconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64SLLVconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SLLVconst [c] (MOVVconst [d]))
+       // cond:
+       // result: (MOVVconst [int64(d)<<uint64(c)])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = int64(d) << uint64(c)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64SRAV(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SRAV x (MOVVconst [c]))
+       // cond: uint64(c)>=64
+       // result: (SRAVconst x [63])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 64) {
+                       break
+               }
+               v.reset(OpMIPS64SRAVconst)
+               v.AuxInt = 63
+               v.AddArg(x)
+               return true
+       }
+       // match: (SRAV x (MOVVconst [c]))
+       // cond:
+       // result: (SRAVconst x [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpMIPS64SRAVconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64SRAVconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SRAVconst [c] (MOVVconst [d]))
+       // cond:
+       // result: (MOVVconst [int64(d)>>uint64(c)])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = int64(d) >> uint64(c)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64SRLV(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SRLV _ (MOVVconst [c]))
+       // cond: uint64(c)>=64
+       // result: (MOVVconst [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 64) {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SRLV x (MOVVconst [c]))
+       // cond:
+       // result: (SRLVconst x [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpMIPS64SRLVconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64SRLVconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SRLVconst [c] (MOVVconst [d]))
+       // cond:
+       // result: (MOVVconst [int64(uint64(d)>>uint64(c))])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = int64(uint64(d) >> uint64(c))
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64SUBV(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SUBV x (MOVVconst [c]))
+       // cond: is32Bit(c)
+       // result: (SUBVconst [c] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpMIPS64SUBVconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (SUBV x x)
+       // cond:
+       // result: (MOVVconst [0])
+       for {
+               x := v.Args[0]
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SUBV (MOVVconst [0]) x)
+       // cond:
+       // result: (NEGV x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               if v_0.AuxInt != 0 {
+                       break
+               }
+               x := v.Args[1]
+               v.reset(OpMIPS64NEGV)
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SUBVconst [0]  x)
+       // cond:
+       // result: x
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (SUBVconst [c] (MOVVconst [d]))
+       // cond:
+       // result: (MOVVconst [d-c])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = d - c
+               return true
+       }
+       // match: (SUBVconst [c] (SUBVconst [d] x))
+       // cond: is32Bit(-c-d)
+       // result: (ADDVconst [-c-d] x)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64SUBVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(is32Bit(-c - d)) {
+                       break
+               }
+               v.reset(OpMIPS64ADDVconst)
+               v.AuxInt = -c - d
+               v.AddArg(x)
+               return true
+       }
+       // match: (SUBVconst [c] (ADDVconst [d] x))
+       // cond: is32Bit(-c+d)
+       // result: (ADDVconst [-c+d] x)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64ADDVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(is32Bit(-c + d)) {
+                       break
+               }
+               v.reset(OpMIPS64ADDVconst)
+               v.AuxInt = -c + d
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64XOR(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (XOR (MOVVconst [c]) x)
+       // cond: is32Bit(c)
+       // result: (XORconst [c] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpMIPS64XORconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (XOR x (MOVVconst [c]))
+       // cond: is32Bit(c)
+       // result: (XORconst [c] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpMIPS64XORconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (XOR x x)
+       // cond:
+       // result: (MOVVconst [0])
+       for {
+               x := v.Args[0]
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 0
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMIPS64XORconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (XORconst [0]  x)
+       // cond:
+       // result: x
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (XORconst [-1] x)
+       // cond:
+       // result: (NORconst [0] x)
+       for {
+               if v.AuxInt != -1 {
+                       break
+               }
+               x := v.Args[0]
+               v.reset(OpMIPS64NORconst)
+               v.AuxInt = 0
+               v.AddArg(x)
+               return true
+       }
+       // match: (XORconst [c] (MOVVconst [d]))
+       // cond:
+       // result: (MOVVconst [c^d])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = c ^ d
+               return true
+       }
+       // match: (XORconst [c] (XORconst [d] x))
+       // cond: is32Bit(c^d)
+       // result: (XORconst [c^d] x)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64XORconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(is32Bit(c ^ d)) {
+                       break
+               }
+               v.reset(OpMIPS64XORconst)
+               v.AuxInt = c ^ d
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpMod16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod16 x y)
+       // cond:
+       // result: (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v1 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMod16u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod16u x y)
+       // cond:
+       // result: (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMod32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod32 x y)
+       // cond:
+       // result: (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v1 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMod32u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod32u x y)
+       // cond:
+       // result: (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMod64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod64 x y)
+       // cond:
+       // result: (Select0 (DIVV x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMod64u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod64u x y)
+       // cond:
+       // result: (Select0 (DIVVU x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMod8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod8 x y)
+       // cond:
+       // result: (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v1 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMod8u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod8u x y)
+       // cond:
+       // result: (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpMove(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Move [s] _ _ mem)
+       // cond: SizeAndAlign(s).Size() == 0
+       // result: mem
+       for {
+               s := v.AuxInt
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 0) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = mem.Type
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 1
+       // result: (MOVBstore dst (MOVBload src mem) mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 1) {
+                       break
+               }
+               v.reset(OpMIPS64MOVBstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore dst (MOVHload src mem) mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpMIPS64MOVHstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 2
+       // result: (MOVBstore [1] dst (MOVBload [1] src mem)            (MOVBstore dst (MOVBload src mem) mem))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
                mem := v.Args[2]
                if !(SizeAndAlign(s).Size() == 2) {
                        break
@@ -5112,66 +8303,434 @@ func rewriteValueMIPS64_OpRsh8x32(v *Value, config *Config) bool {
                v.AddArg(v1)
                return true
        }
-}
-func rewriteValueMIPS64_OpRsh8x64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh8x64 <t> x y)
+}
+func rewriteValueMIPS64_OpRsh8x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x64 <t> x y)
+       // cond:
+       // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SRAV)
+               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v3.AddArg(y)
+               v4 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v4.AuxInt = 63
+               v3.AddArg(v4)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v1.AddArg(y)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpRsh8x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x8  <t> x y)
+       // cond:
+       // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64  y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpMIPS64SRAV)
+               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
+               v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
+               v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
+               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
+               v5.AuxInt = 63
+               v3.AddArg(v5)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v6 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v6.AddArg(y)
+               v1.AddArg(v6)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueMIPS64_OpSelect0(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Select0 (DIVVU _ (MOVVconst [1])))
+       // cond:
+       // result: (MOVVconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64DIVVU {
+                       break
+               }
+               v_0_1 := v_0.Args[1]
+               if v_0_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               if v_0_1.AuxInt != 1 {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (Select0 (DIVVU x (MOVVconst [c])))
+       // cond: isPowerOfTwo(c)
+       // result: (ANDconst [c-1] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64DIVVU {
+                       break
+               }
+               x := v_0.Args[0]
+               v_0_1 := v_0.Args[1]
+               if v_0_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0_1.AuxInt
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpMIPS64ANDconst)
+               v.AuxInt = c - 1
+               v.AddArg(x)
+               return true
+       }
+       // match: (Select0 (DIVV  (MOVVconst [c]) (MOVVconst [d])))
+       // cond:
+       // result: (MOVVconst [int64(c)%int64(d)])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64DIVV {
+                       break
+               }
+               v_0_0 := v_0.Args[0]
+               if v_0_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0_0.AuxInt
+               v_0_1 := v_0.Args[1]
+               if v_0_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0_1.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = int64(c) % int64(d)
+               return true
+       }
+       // match: (Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d])))
+       // cond:
+       // result: (MOVVconst [int64(uint64(c)%uint64(d))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64DIVVU {
+                       break
+               }
+               v_0_0 := v_0.Args[0]
+               if v_0_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0_0.AuxInt
+               v_0_1 := v_0.Args[1]
+               if v_0_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0_1.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = int64(uint64(c) % uint64(d))
+               return true
+       }
+       return false
+}
+func rewriteValueMIPS64_OpSelect1(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Select1 (MULVU x (MOVVconst [-1])))
+       // cond:
+       // result: (NEGV x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MULVU {
+                       break
+               }
+               x := v_0.Args[0]
+               v_0_1 := v_0.Args[1]
+               if v_0_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               if v_0_1.AuxInt != -1 {
+                       break
+               }
+               v.reset(OpMIPS64NEGV)
+               v.AddArg(x)
+               return true
+       }
+       // match: (Select1 (MULVU _ (MOVVconst [0])))
+       // cond:
+       // result: (MOVVconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MULVU {
+                       break
+               }
+               v_0_1 := v_0.Args[1]
+               if v_0_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               if v_0_1.AuxInt != 0 {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (Select1 (MULVU x (MOVVconst [1])))
+       // cond:
+       // result: x
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MULVU {
+                       break
+               }
+               x := v_0.Args[0]
+               v_0_1 := v_0.Args[1]
+               if v_0_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               if v_0_1.AuxInt != 1 {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (Select1 (MULVU x (MOVVconst [c])))
+       // cond: isPowerOfTwo(c)
+       // result: (SLLVconst [log2(c)] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MULVU {
+                       break
+               }
+               x := v_0.Args[0]
+               v_0_1 := v_0.Args[1]
+               if v_0_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0_1.AuxInt
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpMIPS64SLLVconst)
+               v.AuxInt = log2(c)
+               v.AddArg(x)
+               return true
+       }
+       // match: (Select1 (MULVU (MOVVconst [-1]) x))
+       // cond:
+       // result: (NEGV x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MULVU {
+                       break
+               }
+               v_0_0 := v_0.Args[0]
+               if v_0_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               if v_0_0.AuxInt != -1 {
+                       break
+               }
+               x := v_0.Args[1]
+               v.reset(OpMIPS64NEGV)
+               v.AddArg(x)
+               return true
+       }
+       // match: (Select1 (MULVU (MOVVconst [0]) _))
+       // cond:
+       // result: (MOVVconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MULVU {
+                       break
+               }
+               v_0_0 := v_0.Args[0]
+               if v_0_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               if v_0_0.AuxInt != 0 {
+                       break
+               }
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (Select1 (MULVU (MOVVconst [1]) x))
+       // cond:
+       // result: x
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MULVU {
+                       break
+               }
+               v_0_0 := v_0.Args[0]
+               if v_0_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               if v_0_0.AuxInt != 1 {
+                       break
+               }
+               x := v_0.Args[1]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (Select1 (MULVU (MOVVconst [c]) x))
+       // cond: isPowerOfTwo(c)
+       // result: (SLLVconst [log2(c)] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MULVU {
+                       break
+               }
+               v_0_0 := v_0.Args[0]
+               if v_0_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0_0.AuxInt
+               x := v_0.Args[1]
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpMIPS64SLLVconst)
+               v.AuxInt = log2(c)
+               v.AddArg(x)
+               return true
+       }
+       // match: (Select1 (DIVVU x (MOVVconst [1])))
+       // cond:
+       // result: x
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64DIVVU {
+                       break
+               }
+               x := v_0.Args[0]
+               v_0_1 := v_0.Args[1]
+               if v_0_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               if v_0_1.AuxInt != 1 {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (Select1 (DIVVU x (MOVVconst [c])))
+       // cond: isPowerOfTwo(c)
+       // result: (SRLVconst [log2(c)] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64DIVVU {
+                       break
+               }
+               x := v_0.Args[0]
+               v_0_1 := v_0.Args[1]
+               if v_0_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0_1.AuxInt
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpMIPS64SRLVconst)
+               v.AuxInt = log2(c)
+               v.AddArg(x)
+               return true
+       }
+       // match: (Select1 (MULVU (MOVVconst [c]) (MOVVconst [d])))
        // cond:
-       // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
+       // result: (MOVVconst [c*d])
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpMIPS64SRAV)
-               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
-               v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-               v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-               v3.AddArg(y)
-               v4 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-               v4.AuxInt = 63
-               v3.AddArg(v4)
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64MULVU {
+                       break
+               }
+               v_0_0 := v_0.Args[0]
+               if v_0_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0_0.AuxInt
+               v_0_1 := v_0.Args[1]
+               if v_0_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0_1.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = c * d
                return true
        }
-}
-func rewriteValueMIPS64_OpRsh8x8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh8x8  <t> x y)
+       // match: (Select1 (DIVV  (MOVVconst [c]) (MOVVconst [d])))
        // cond:
-       // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64  y)))
+       // result: (MOVVconst [int64(c)/int64(d)])
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpMIPS64SRAV)
-               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
-               v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-               v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-               v5.AuxInt = 63
-               v3.AddArg(v5)
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v6 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v6.AddArg(y)
-               v1.AddArg(v6)
-               v.AddArg(v1)
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64DIVV {
+                       break
+               }
+               v_0_0 := v_0.Args[0]
+               if v_0_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0_0.AuxInt
+               v_0_1 := v_0.Args[1]
+               if v_0_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0_1.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = int64(c) / int64(d)
+               return true
+       }
+       // match: (Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d])))
+       // cond:
+       // result: (MOVVconst [int64(uint64(c)/uint64(d))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpMIPS64DIVVU {
+                       break
+               }
+               v_0_0 := v_0.Args[0]
+               if v_0_0.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               c := v_0_0.AuxInt
+               v_0_1 := v_0.Args[1]
+               if v_0_1.Op != OpMIPS64MOVVconst {
+                       break
+               }
+               d := v_0_1.AuxInt
+               v.reset(OpMIPS64MOVVconst)
+               v.AuxInt = int64(uint64(c) / uint64(d))
                return true
        }
+       return false
 }
 func rewriteValueMIPS64_OpSignExt16to32(v *Value, config *Config) bool {
        b := v.Block
@@ -6153,6 +9712,345 @@ func rewriteValueMIPS64_OpZeroExt8to64(v *Value, config *Config) bool {
 }
 func rewriteBlockMIPS64(b *Block) bool {
        switch b.Kind {
+       case BlockMIPS64EQ:
+               // match: (EQ (FPFlagTrue cmp) yes no)
+               // cond:
+               // result: (FPF cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64FPFlagTrue {
+                               break
+                       }
+                       cmp := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64FPF
+                       b.SetControl(cmp)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (EQ (FPFlagFalse cmp) yes no)
+               // cond:
+               // result: (FPT cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64FPFlagFalse {
+                               break
+                       }
+                       cmp := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64FPT
+                       b.SetControl(cmp)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no)
+               // cond:
+               // result: (NE cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64XORconst {
+                               break
+                       }
+                       if v.AuxInt != 1 {
+                               break
+                       }
+                       cmp := v.Args[0]
+                       if cmp.Op != OpMIPS64SGT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64NE
+                       b.SetControl(cmp)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no)
+               // cond:
+               // result: (NE cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64XORconst {
+                               break
+                       }
+                       if v.AuxInt != 1 {
+                               break
+                       }
+                       cmp := v.Args[0]
+                       if cmp.Op != OpMIPS64SGTU {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64NE
+                       b.SetControl(cmp)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no)
+               // cond:
+               // result: (NE cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64XORconst {
+                               break
+                       }
+                       if v.AuxInt != 1 {
+                               break
+                       }
+                       cmp := v.Args[0]
+                       if cmp.Op != OpMIPS64SGTconst {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64NE
+                       b.SetControl(cmp)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no)
+               // cond:
+               // result: (NE cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64XORconst {
+                               break
+                       }
+                       if v.AuxInt != 1 {
+                               break
+                       }
+                       cmp := v.Args[0]
+                       if cmp.Op != OpMIPS64SGTUconst {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64NE
+                       b.SetControl(cmp)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (EQ (SGTUconst [1] x) yes no)
+               // cond:
+               // result: (NE x yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64SGTUconst {
+                               break
+                       }
+                       if v.AuxInt != 1 {
+                               break
+                       }
+                       x := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64NE
+                       b.SetControl(x)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (EQ (SGTU x (MOVVconst [0])) yes no)
+               // cond:
+               // result: (EQ x yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64SGTU {
+                               break
+                       }
+                       x := v.Args[0]
+                       v_1 := v.Args[1]
+                       if v_1.Op != OpMIPS64MOVVconst {
+                               break
+                       }
+                       if v_1.AuxInt != 0 {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64EQ
+                       b.SetControl(x)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (EQ (SGTconst [0] x) yes no)
+               // cond:
+               // result: (GEZ x yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64SGTconst {
+                               break
+                       }
+                       if v.AuxInt != 0 {
+                               break
+                       }
+                       x := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64GEZ
+                       b.SetControl(x)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (EQ (SGT x (MOVVconst [0])) yes no)
+               // cond:
+               // result: (LEZ x yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64SGT {
+                               break
+                       }
+                       x := v.Args[0]
+                       v_1 := v.Args[1]
+                       if v_1.Op != OpMIPS64MOVVconst {
+                               break
+                       }
+                       if v_1.AuxInt != 0 {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64LEZ
+                       b.SetControl(x)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (EQ  (MOVVconst [0]) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64MOVVconst {
+                               break
+                       }
+                       if v.AuxInt != 0 {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (EQ  (MOVVconst [c]) yes no)
+               // cond: c != 0
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64MOVVconst {
+                               break
+                       }
+                       c := v.AuxInt
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       if !(c != 0) {
+                               break
+                       }
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+       case BlockMIPS64GEZ:
+               // match: (GEZ (MOVVconst [c]) yes no)
+               // cond: c >= 0
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64MOVVconst {
+                               break
+                       }
+                       c := v.AuxInt
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       if !(c >= 0) {
+                               break
+                       }
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (GEZ (MOVVconst [c]) yes no)
+               // cond: c <  0
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64MOVVconst {
+                               break
+                       }
+                       c := v.AuxInt
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       if !(c < 0) {
+                               break
+                       }
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+       case BlockMIPS64GTZ:
+               // match: (GTZ (MOVVconst [c]) yes no)
+               // cond: c >  0
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64MOVVconst {
+                               break
+                       }
+                       c := v.AuxInt
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       if !(c > 0) {
+                               break
+                       }
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (GTZ (MOVVconst [c]) yes no)
+               // cond: c <= 0
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64MOVVconst {
+                               break
+                       }
+                       c := v.AuxInt
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       if !(c <= 0) {
+                               break
+                       }
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
        case BlockIf:
                // match: (If cond yes no)
                // cond:
@@ -6169,6 +10067,90 @@ func rewriteBlockMIPS64(b *Block) bool {
                        _ = no
                        return true
                }
+       case BlockMIPS64LEZ:
+               // match: (LEZ (MOVVconst [c]) yes no)
+               // cond: c <= 0
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64MOVVconst {
+                               break
+                       }
+                       c := v.AuxInt
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       if !(c <= 0) {
+                               break
+                       }
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (LEZ (MOVVconst [c]) yes no)
+               // cond: c >  0
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64MOVVconst {
+                               break
+                       }
+                       c := v.AuxInt
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       if !(c > 0) {
+                               break
+                       }
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+       case BlockMIPS64LTZ:
+               // match: (LTZ (MOVVconst [c]) yes no)
+               // cond: c <  0
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64MOVVconst {
+                               break
+                       }
+                       c := v.AuxInt
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       if !(c < 0) {
+                               break
+                       }
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (LTZ (MOVVconst [c]) yes no)
+               // cond: c >= 0
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64MOVVconst {
+                               break
+                       }
+                       c := v.AuxInt
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       if !(c >= 0) {
+                               break
+                       }
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
        case BlockMIPS64NE:
                // match: (NE (FPFlagTrue cmp) yes no)
                // cond:
@@ -6204,6 +10186,226 @@ func rewriteBlockMIPS64(b *Block) bool {
                        _ = no
                        return true
                }
+               // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no)
+               // cond:
+               // result: (EQ cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64XORconst {
+                               break
+                       }
+                       if v.AuxInt != 1 {
+                               break
+                       }
+                       cmp := v.Args[0]
+                       if cmp.Op != OpMIPS64SGT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64EQ
+                       b.SetControl(cmp)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no)
+               // cond:
+               // result: (EQ cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64XORconst {
+                               break
+                       }
+                       if v.AuxInt != 1 {
+                               break
+                       }
+                       cmp := v.Args[0]
+                       if cmp.Op != OpMIPS64SGTU {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64EQ
+                       b.SetControl(cmp)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no)
+               // cond:
+               // result: (EQ cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64XORconst {
+                               break
+                       }
+                       if v.AuxInt != 1 {
+                               break
+                       }
+                       cmp := v.Args[0]
+                       if cmp.Op != OpMIPS64SGTconst {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64EQ
+                       b.SetControl(cmp)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no)
+               // cond:
+               // result: (EQ cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64XORconst {
+                               break
+                       }
+                       if v.AuxInt != 1 {
+                               break
+                       }
+                       cmp := v.Args[0]
+                       if cmp.Op != OpMIPS64SGTUconst {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64EQ
+                       b.SetControl(cmp)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (SGTUconst [1] x) yes no)
+               // cond:
+               // result: (EQ x yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64SGTUconst {
+                               break
+                       }
+                       if v.AuxInt != 1 {
+                               break
+                       }
+                       x := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64EQ
+                       b.SetControl(x)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (SGTU x (MOVVconst [0])) yes no)
+               // cond:
+               // result: (NE x yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64SGTU {
+                               break
+                       }
+                       x := v.Args[0]
+                       v_1 := v.Args[1]
+                       if v_1.Op != OpMIPS64MOVVconst {
+                               break
+                       }
+                       if v_1.AuxInt != 0 {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64NE
+                       b.SetControl(x)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (SGTconst [0] x) yes no)
+               // cond:
+               // result: (LTZ x yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64SGTconst {
+                               break
+                       }
+                       if v.AuxInt != 0 {
+                               break
+                       }
+                       x := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64LTZ
+                       b.SetControl(x)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (SGT x (MOVVconst [0])) yes no)
+               // cond:
+               // result: (GTZ x yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64SGT {
+                               break
+                       }
+                       x := v.Args[0]
+                       v_1 := v.Args[1]
+                       if v_1.Op != OpMIPS64MOVVconst {
+                               break
+                       }
+                       if v_1.AuxInt != 0 {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockMIPS64GTZ
+                       b.SetControl(x)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE  (MOVVconst [0]) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64MOVVconst {
+                               break
+                       }
+                       if v.AuxInt != 0 {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (NE  (MOVVconst [c]) yes no)
+               // cond: c != 0
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpMIPS64MOVVconst {
+                               break
+                       }
+                       c := v.AuxInt
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       if !(c != 0) {
+                               break
+                       }
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
        }
        return false
 }
index 054c40eca6f335a483bb63d9079963f477a38947..db47e14e93c2a4d8cd5e6940b347c9f32e484e76 100644 (file)
@@ -1,4 +1,4 @@
-// +build !amd64,!arm,!amd64p32,!386,!arm64,!ppc64le
+// +build !amd64,!arm,!amd64p32,!386,!arm64,!ppc64le,!mips64,!mips64le
 // errorcheck -0 -l -live -wb=0
 
 // Copyright 2014 The Go Authors. All rights reserved.
index 881f139a20fe2687400454174e6093f2cd911c0d..27c4528dc1b022c81c3eeeae4ea4518bacc804ab 100644 (file)
@@ -1,4 +1,4 @@
-// +build amd64 arm amd64p32 386 arm64
+// +build amd64 arm amd64p32 386 arm64 mips64 mips64le
 // errorcheck -0 -l -live -wb=0
 
 // Copyright 2014 The Go Authors. All rights reserved.
index af1186579e65759bb883837005f7311468d77cfe..38c511997ce78f7dff385918902864aacfd4c16d 100644 (file)
@@ -1,5 +1,5 @@
 // errorcheck -0 -d=nil
-// +build amd64 arm amd64p32 386 arm64
+// +build amd64 arm amd64p32 386 arm64 mips64 mips64le
 
 // Copyright 2013 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
index 33b4818f186247823c4f9298f71d3dcbbdb095f8..4c38541b03989bdc2154d167d9d52ad07030443b 100644 (file)
@@ -1,4 +1,4 @@
-// +build !amd64,!arm,!amd64p32,!386,!arm64,!ppc64le
+// +build !amd64,!arm,!amd64p32,!386,!arm64,!ppc64le,!mips64,!mips64le
 // errorcheck -0 -d=append,slice
 
 // Copyright 2015 The Go Authors. All rights reserved.