]> Cypherpunks repositories - gostls13.git/commitdiff
[dev.ssa] cmd/compile: use shifted and indexed ops in SSA for ARM
authorCherry Zhang <cherryyz@google.com>
Fri, 17 Jun 2016 14:34:06 +0000 (10:34 -0400)
committerCherry Zhang <cherryyz@google.com>
Fri, 15 Jul 2016 18:19:59 +0000 (18:19 +0000)
This CL implements the following optimizations for ARM:
- use shifted ops (e.g. ADD R1<<2, R2) and indexed load/stores
- break up shift ops. Shifts used to be one SSA op that generates
  multiple instructions. We break them up to multiple ops, which
  allows constant folding and CSE for comparisons. Conditional moves
  are introduced for this.
- simplify zero/sign-extension ops.

Updates #15365.

Change-Id: I55e262a776a7ef2a1505d75e04d1208913c35d39
Reviewed-on: https://go-review.googlesource.com/24512
Run-TryBot: Cherry Zhang <cherryyz@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
src/cmd/compile/internal/arm/ssa.go
src/cmd/compile/internal/gc/testdata/arith_ssa.go
src/cmd/compile/internal/ssa/gen/ARM.rules
src/cmd/compile/internal/ssa/gen/ARMOps.go
src/cmd/compile/internal/ssa/op.go
src/cmd/compile/internal/ssa/opGen.go
src/cmd/compile/internal/ssa/rewriteARM.go

index 51722c4f354af634a46740c6930bd8d8f8a45c3f..5dcd8ca1acd9828c05d62fe9dfad6550af582160 100644 (file)
@@ -5,6 +5,7 @@
 package arm
 
 import (
+       "fmt"
        "math"
 
        "cmd/compile/internal/gc"
@@ -108,6 +109,57 @@ func storeByType(t ssa.Type) obj.As {
        panic("bad store type")
 }
 
+// shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands
+type shift int64
+
+// copied from ../../../internal/obj/util.go:/TYPE_SHIFT
+func (v shift) String() string {
+       op := "<<>>->@>"[((v>>5)&3)<<1:]
+       if v&(1<<4) != 0 {
+               // register shift
+               return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
+       } else {
+               // constant shift
+               return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
+       }
+}
+
+// makeshift encodes a register shifted by a constant
+func makeshift(reg int16, typ int64, s int64) shift {
+       return shift(int64(reg&0xf) | typ | (s&31)<<7)
+}
+
+// genshift generates a Prog for r = r0 op (r1 shifted by s)
+func genshift(as obj.As, r0, r1, r int16, typ int64, s int64) *obj.Prog {
+       p := gc.Prog(as)
+       p.From.Type = obj.TYPE_SHIFT
+       p.From.Offset = int64(makeshift(r1, typ, s))
+       p.Reg = r0
+       if r != 0 {
+               p.To.Type = obj.TYPE_REG
+               p.To.Reg = r
+       }
+       return p
+}
+
+// makeregshift encodes a register shifted by a register
+func makeregshift(r1 int16, typ int64, r2 int16) shift {
+       return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4)
+}
+
+// genregshift generates a Prog for r = r0 op (r1 shifted by r2)
+func genregshift(as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
+       p := gc.Prog(as)
+       p.From.Type = obj.TYPE_SHIFT
+       p.From.Offset = int64(makeregshift(r1, typ, r2))
+       p.Reg = r0
+       if r != 0 {
+               p.To.Type = obj.TYPE_REG
+               p.To.Reg = r
+       }
+       return p
+}
+
 func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
        s.SetLineno(v.Line)
        switch v.Op {
@@ -237,45 +289,27 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                p.To.Type = obj.TYPE_REG
                p.To.Reg = r
        case ssa.OpARMSLL,
-               ssa.OpARMSRL:
-               // ARM shift instructions uses only the low-order byte of the shift amount
-               // generate conditional instructions to deal with large shifts
-               // CMP  $32, Rarg1
-               // SLL  Rarg1, Rarg0, Rdst
-               // MOVW.HS      $0, Rdst
+               ssa.OpARMSRL,
+               ssa.OpARMSRA:
                r := gc.SSARegNum(v)
                r1 := gc.SSARegNum(v.Args[0])
                r2 := gc.SSARegNum(v.Args[1])
-               p := gc.Prog(arm.ACMP)
-               p.From.Type = obj.TYPE_CONST
-               p.From.Offset = 32
-               p.Reg = r2
-               p = gc.Prog(v.Op.Asm())
+               p := gc.Prog(v.Op.Asm())
                p.From.Type = obj.TYPE_REG
                p.From.Reg = r2
                p.Reg = r1
                p.To.Type = obj.TYPE_REG
                p.To.Reg = r
-               p = gc.Prog(arm.AMOVW)
-               p.Scond = arm.C_SCOND_HS
-               p.From.Type = obj.TYPE_CONST
-               p.From.Offset = 0
-               p.To.Type = obj.TYPE_REG
-               p.To.Reg = r
-       case ssa.OpARMSRA:
+       case ssa.OpARMSRAcond:
                // ARM shift instructions uses only the low-order byte of the shift amount
                // generate conditional instructions to deal with large shifts
-               // CMP  $32, Rarg1
+               // flag is already set
                // SRA.HS       $31, Rarg0, Rdst // shift 31 bits to get the sign bit
                // SRA.LO       Rarg1, Rarg0, Rdst
                r := gc.SSARegNum(v)
                r1 := gc.SSARegNum(v.Args[0])
                r2 := gc.SSARegNum(v.Args[1])
-               p := gc.Prog(arm.ACMP)
-               p.From.Type = obj.TYPE_CONST
-               p.From.Offset = 32
-               p.Reg = r2
-               p = gc.Prog(arm.ASRA)
+               p := gc.Prog(arm.ASRA)
                p.Scond = arm.C_SCOND_HS
                p.From.Type = obj.TYPE_CONST
                p.From.Offset = 31
@@ -319,11 +353,115 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                p.To.Type = obj.TYPE_REG
                p.To.Reg = gc.SSARegNum(v)
        case ssa.OpARMSRRconst:
-               p := gc.Prog(arm.AMOVW)
-               p.From.Type = obj.TYPE_SHIFT
-               p.From.Offset = int64(gc.SSARegNum(v.Args[0])&0xf) | arm.SHIFT_RR | (v.AuxInt&31)<<7
-               p.To.Type = obj.TYPE_REG
-               p.To.Reg = gc.SSARegNum(v)
+               genshift(arm.AMOVW, 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_RR, v.AuxInt)
+       case ssa.OpARMADDshiftLL,
+               ssa.OpARMADCshiftLL,
+               ssa.OpARMSUBshiftLL,
+               ssa.OpARMSBCshiftLL,
+               ssa.OpARMRSBshiftLL,
+               ssa.OpARMRSCshiftLL,
+               ssa.OpARMANDshiftLL,
+               ssa.OpARMORshiftLL,
+               ssa.OpARMXORshiftLL,
+               ssa.OpARMBICshiftLL:
+               genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt)
+       case ssa.OpARMADDSshiftLL,
+               ssa.OpARMSUBSshiftLL,
+               ssa.OpARMRSBSshiftLL:
+               p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt)
+               p.Scond = arm.C_SBIT
+       case ssa.OpARMADDshiftRL,
+               ssa.OpARMADCshiftRL,
+               ssa.OpARMSUBshiftRL,
+               ssa.OpARMSBCshiftRL,
+               ssa.OpARMRSBshiftRL,
+               ssa.OpARMRSCshiftRL,
+               ssa.OpARMANDshiftRL,
+               ssa.OpARMORshiftRL,
+               ssa.OpARMXORshiftRL,
+               ssa.OpARMBICshiftRL:
+               genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt)
+       case ssa.OpARMADDSshiftRL,
+               ssa.OpARMSUBSshiftRL,
+               ssa.OpARMRSBSshiftRL:
+               p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt)
+               p.Scond = arm.C_SBIT
+       case ssa.OpARMADDshiftRA,
+               ssa.OpARMADCshiftRA,
+               ssa.OpARMSUBshiftRA,
+               ssa.OpARMSBCshiftRA,
+               ssa.OpARMRSBshiftRA,
+               ssa.OpARMRSCshiftRA,
+               ssa.OpARMANDshiftRA,
+               ssa.OpARMORshiftRA,
+               ssa.OpARMXORshiftRA,
+               ssa.OpARMBICshiftRA:
+               genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt)
+       case ssa.OpARMADDSshiftRA,
+               ssa.OpARMSUBSshiftRA,
+               ssa.OpARMRSBSshiftRA:
+               p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt)
+               p.Scond = arm.C_SBIT
+       case ssa.OpARMMVNshiftLL:
+               genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt)
+       case ssa.OpARMMVNshiftRL:
+               genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt)
+       case ssa.OpARMMVNshiftRA:
+               genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt)
+       case ssa.OpARMMVNshiftLLreg:
+               genregshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL)
+       case ssa.OpARMMVNshiftRLreg:
+               genregshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR)
+       case ssa.OpARMMVNshiftRAreg:
+               genregshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR)
+       case ssa.OpARMADDshiftLLreg,
+               ssa.OpARMADCshiftLLreg,
+               ssa.OpARMSUBshiftLLreg,
+               ssa.OpARMSBCshiftLLreg,
+               ssa.OpARMRSBshiftLLreg,
+               ssa.OpARMRSCshiftLLreg,
+               ssa.OpARMANDshiftLLreg,
+               ssa.OpARMORshiftLLreg,
+               ssa.OpARMXORshiftLLreg,
+               ssa.OpARMBICshiftLLreg:
+               genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_LL)
+       case ssa.OpARMADDSshiftLLreg,
+               ssa.OpARMSUBSshiftLLreg,
+               ssa.OpARMRSBSshiftLLreg:
+               p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_LL)
+               p.Scond = arm.C_SBIT
+       case ssa.OpARMADDshiftRLreg,
+               ssa.OpARMADCshiftRLreg,
+               ssa.OpARMSUBshiftRLreg,
+               ssa.OpARMSBCshiftRLreg,
+               ssa.OpARMRSBshiftRLreg,
+               ssa.OpARMRSCshiftRLreg,
+               ssa.OpARMANDshiftRLreg,
+               ssa.OpARMORshiftRLreg,
+               ssa.OpARMXORshiftRLreg,
+               ssa.OpARMBICshiftRLreg:
+               genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_LR)
+       case ssa.OpARMADDSshiftRLreg,
+               ssa.OpARMSUBSshiftRLreg,
+               ssa.OpARMRSBSshiftRLreg:
+               p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_LR)
+               p.Scond = arm.C_SBIT
+       case ssa.OpARMADDshiftRAreg,
+               ssa.OpARMADCshiftRAreg,
+               ssa.OpARMSUBshiftRAreg,
+               ssa.OpARMSBCshiftRAreg,
+               ssa.OpARMRSBshiftRAreg,
+               ssa.OpARMRSCshiftRAreg,
+               ssa.OpARMANDshiftRAreg,
+               ssa.OpARMORshiftRAreg,
+               ssa.OpARMXORshiftRAreg,
+               ssa.OpARMBICshiftRAreg:
+               genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_AR)
+       case ssa.OpARMADDSshiftRAreg,
+               ssa.OpARMSUBSshiftRAreg,
+               ssa.OpARMRSBSshiftRAreg:
+               p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_AR)
+               p.Scond = arm.C_SBIT
        case ssa.OpARMHMUL,
                ssa.OpARMHMULU:
                // 32-bit high multiplication
@@ -385,6 +523,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                p.From.Type = obj.TYPE_CONST
                p.From.Offset = v.AuxInt
                p.Reg = gc.SSARegNum(v.Args[0])
+       case ssa.OpARMCMPshiftLL:
+               genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm.SHIFT_LL, v.AuxInt)
+       case ssa.OpARMCMPshiftRL:
+               genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm.SHIFT_LR, v.AuxInt)
+       case ssa.OpARMCMPshiftRA:
+               genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm.SHIFT_AR, v.AuxInt)
+       case ssa.OpARMCMPshiftLLreg:
+               genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), 0, arm.SHIFT_LL)
+       case ssa.OpARMCMPshiftRLreg:
+               genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), 0, arm.SHIFT_LR)
+       case ssa.OpARMCMPshiftRAreg:
+               genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), 0, arm.SHIFT_AR)
        case ssa.OpARMMOVWaddr:
                p := gc.Prog(arm.AMOVW)
                p.From.Type = obj.TYPE_ADDR
@@ -440,11 +590,72 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                p.To.Type = obj.TYPE_MEM
                p.To.Reg = gc.SSARegNum(v.Args[0])
                gc.AddAux(&p.To, v)
+       case ssa.OpARMMOVWloadidx:
+               // this is just shift 0 bits
+               fallthrough
+       case ssa.OpARMMOVWloadshiftLL:
+               p := genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt)
+               p.From.Reg = gc.SSARegNum(v.Args[0])
+       case ssa.OpARMMOVWloadshiftRL:
+               p := genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt)
+               p.From.Reg = gc.SSARegNum(v.Args[0])
+       case ssa.OpARMMOVWloadshiftRA:
+               p := genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt)
+               p.From.Reg = gc.SSARegNum(v.Args[0])
+       case ssa.OpARMMOVWstoreidx:
+               // this is just shift 0 bits
+               fallthrough
+       case ssa.OpARMMOVWstoreshiftLL:
+               p := gc.Prog(v.Op.Asm())
+               p.From.Type = obj.TYPE_REG
+               p.From.Reg = gc.SSARegNum(v.Args[2])
+               p.To.Type = obj.TYPE_SHIFT
+               p.To.Reg = gc.SSARegNum(v.Args[0])
+               p.To.Offset = int64(makeshift(gc.SSARegNum(v.Args[1]), arm.SHIFT_LL, v.AuxInt))
+       case ssa.OpARMMOVWstoreshiftRL:
+               p := gc.Prog(v.Op.Asm())
+               p.From.Type = obj.TYPE_REG
+               p.From.Reg = gc.SSARegNum(v.Args[2])
+               p.To.Type = obj.TYPE_SHIFT
+               p.To.Reg = gc.SSARegNum(v.Args[0])
+               p.To.Offset = int64(makeshift(gc.SSARegNum(v.Args[1]), arm.SHIFT_LR, v.AuxInt))
+       case ssa.OpARMMOVWstoreshiftRA:
+               p := gc.Prog(v.Op.Asm())
+               p.From.Type = obj.TYPE_REG
+               p.From.Reg = gc.SSARegNum(v.Args[2])
+               p.To.Type = obj.TYPE_SHIFT
+               p.To.Reg = gc.SSARegNum(v.Args[0])
+               p.To.Offset = int64(makeshift(gc.SSARegNum(v.Args[1]), arm.SHIFT_AR, v.AuxInt))
        case ssa.OpARMMOVBreg,
                ssa.OpARMMOVBUreg,
                ssa.OpARMMOVHreg,
-               ssa.OpARMMOVHUreg,
-               ssa.OpARMMVN,
+               ssa.OpARMMOVHUreg:
+               a := v.Args[0]
+               for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg {
+                       a = a.Args[0]
+               }
+               if a.Op == ssa.OpLoadReg {
+                       t := a.Type
+                       switch {
+                       case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(),
+                               v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(),
+                               v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(),
+                               v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned():
+                               // arg is a proper-typed load, already zero/sign-extended, don't extend again
+                               if gc.SSARegNum(v) == gc.SSARegNum(v.Args[0]) {
+                                       return
+                               }
+                               p := gc.Prog(arm.AMOVW)
+                               p.From.Type = obj.TYPE_REG
+                               p.From.Reg = gc.SSARegNum(v.Args[0])
+                               p.To.Type = obj.TYPE_REG
+                               p.To.Reg = gc.SSARegNum(v)
+                               return
+                       default:
+                       }
+               }
+               fallthrough
+       case ssa.OpARMMVN,
                ssa.OpARMSQRTD,
                ssa.OpARMMOVWF,
                ssa.OpARMMOVWD,
@@ -467,6 +678,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                p.From.Reg = gc.SSARegNum(v.Args[0])
                p.To.Type = obj.TYPE_REG
                p.To.Reg = gc.SSARegNum(v)
+       case ssa.OpARMCMOVWHSconst:
+               p := gc.Prog(arm.AMOVW)
+               p.Scond = arm.C_SCOND_HS
+               p.From.Type = obj.TYPE_CONST
+               p.From.Offset = v.AuxInt
+               p.To.Type = obj.TYPE_REG
+               p.To.Reg = gc.SSARegNum(v)
+       case ssa.OpARMCMOVWLSconst:
+               p := gc.Prog(arm.AMOVW)
+               p.Scond = arm.C_SCOND_LS
+               p.From.Type = obj.TYPE_CONST
+               p.From.Offset = v.AuxInt
+               p.To.Type = obj.TYPE_REG
+               p.To.Reg = gc.SSARegNum(v)
        case ssa.OpARMCALLstatic:
                if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
                        // Deferred calls will appear to be returning to
@@ -657,24 +882,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                p4 := gc.Prog(arm.ABLT)
                p4.To.Type = obj.TYPE_BRANCH
                gc.Patch(p4, p)
-       case ssa.OpARMLoweredZeromask:
-               // int32(arg0>>1 - arg0) >> 31
-               // RSB  r0>>1, r0, r
-               // SRA  $31, r, r
-               r0 := gc.SSARegNum(v.Args[0])
-               r := gc.SSARegNum(v)
-               p := gc.Prog(arm.ARSB)
-               p.From.Type = obj.TYPE_SHIFT
-               p.From.Offset = int64(r0&0xf) | arm.SHIFT_LR | 1<<7 // unsigned r0>>1
-               p.Reg = r0
-               p.To.Type = obj.TYPE_REG
-               p.To.Reg = r
-               p = gc.Prog(arm.ASRA)
-               p.From.Type = obj.TYPE_CONST
-               p.From.Offset = 31
-               p.Reg = r
-               p.To.Type = obj.TYPE_REG
-               p.To.Reg = r
        case ssa.OpVarDef:
                gc.Gvardef(v.Aux.(*gc.Node))
        case ssa.OpVarKill:
index 7c82bbd6ce59650517bead85745c071b4fad9f96..d850ce27b22e37138f9d45cc3be1448fb221b88e 100644 (file)
@@ -553,6 +553,445 @@ func testOrPhi() {
        }
 }
 
+//go:noinline
+func addshiftLL_ssa(a, b uint32) uint32 {
+       return a + b<<3
+}
+
+//go:noinline
+func subshiftLL_ssa(a, b uint32) uint32 {
+       return a - b<<3
+}
+
+//go:noinline
+func rsbshiftLL_ssa(a, b uint32) uint32 {
+       return a<<3 - b
+}
+
+//go:noinline
+func andshiftLL_ssa(a, b uint32) uint32 {
+       return a & (b << 3)
+}
+
+//go:noinline
+func orshiftLL_ssa(a, b uint32) uint32 {
+       return a | b<<3
+}
+
+//go:noinline
+func xorshiftLL_ssa(a, b uint32) uint32 {
+       return a ^ b<<3
+}
+
+//go:noinline
+func bicshiftLL_ssa(a, b uint32) uint32 {
+       return a &^ (b << 3)
+}
+
+//go:noinline
+func notshiftLL_ssa(a uint32) uint32 {
+       return ^(a << 3)
+}
+
+//go:noinline
+func addshiftRL_ssa(a, b uint32) uint32 {
+       return a + b>>3
+}
+
+//go:noinline
+func subshiftRL_ssa(a, b uint32) uint32 {
+       return a - b>>3
+}
+
+//go:noinline
+func rsbshiftRL_ssa(a, b uint32) uint32 {
+       return a>>3 - b
+}
+
+//go:noinline
+func andshiftRL_ssa(a, b uint32) uint32 {
+       return a & (b >> 3)
+}
+
+//go:noinline
+func orshiftRL_ssa(a, b uint32) uint32 {
+       return a | b>>3
+}
+
+//go:noinline
+func xorshiftRL_ssa(a, b uint32) uint32 {
+       return a ^ b>>3
+}
+
+//go:noinline
+func bicshiftRL_ssa(a, b uint32) uint32 {
+       return a &^ (b >> 3)
+}
+
+//go:noinline
+func notshiftRL_ssa(a uint32) uint32 {
+       return ^(a >> 3)
+}
+
+//go:noinline
+func addshiftRA_ssa(a, b int32) int32 {
+       return a + b>>3
+}
+
+//go:noinline
+func subshiftRA_ssa(a, b int32) int32 {
+       return a - b>>3
+}
+
+//go:noinline
+func rsbshiftRA_ssa(a, b int32) int32 {
+       return a>>3 - b
+}
+
+//go:noinline
+func andshiftRA_ssa(a, b int32) int32 {
+       return a & (b >> 3)
+}
+
+//go:noinline
+func orshiftRA_ssa(a, b int32) int32 {
+       return a | b>>3
+}
+
+//go:noinline
+func xorshiftRA_ssa(a, b int32) int32 {
+       return a ^ b>>3
+}
+
+//go:noinline
+func bicshiftRA_ssa(a, b int32) int32 {
+       return a &^ (b >> 3)
+}
+
+//go:noinline
+func notshiftRA_ssa(a int32) int32 {
+       return ^(a >> 3)
+}
+
+//go:noinline
+func addshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+       return a + b<<s
+}
+
+//go:noinline
+func subshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+       return a - b<<s
+}
+
+//go:noinline
+func rsbshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+       return a<<s - b
+}
+
+//go:noinline
+func andshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+       return a & (b << s)
+}
+
+//go:noinline
+func orshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+       return a | b<<s
+}
+
+//go:noinline
+func xorshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+       return a ^ b<<s
+}
+
+//go:noinline
+func bicshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+       return a &^ (b << s)
+}
+
+//go:noinline
+func notshiftLLreg_ssa(a uint32, s uint8) uint32 {
+       return ^(a << s)
+}
+
+//go:noinline
+func addshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+       return a + b>>s
+}
+
+//go:noinline
+func subshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+       return a - b>>s
+}
+
+//go:noinline
+func rsbshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+       return a>>s - b
+}
+
+//go:noinline
+func andshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+       return a & (b >> s)
+}
+
+//go:noinline
+func orshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+       return a | b>>s
+}
+
+//go:noinline
+func xorshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+       return a ^ b>>s
+}
+
+//go:noinline
+func bicshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+       return a &^ (b >> s)
+}
+
+//go:noinline
+func notshiftRLreg_ssa(a uint32, s uint8) uint32 {
+       return ^(a >> s)
+}
+
+//go:noinline
+func addshiftRAreg_ssa(a, b int32, s uint8) int32 {
+       return a + b>>s
+}
+
+//go:noinline
+func subshiftRAreg_ssa(a, b int32, s uint8) int32 {
+       return a - b>>s
+}
+
+//go:noinline
+func rsbshiftRAreg_ssa(a, b int32, s uint8) int32 {
+       return a>>s - b
+}
+
+//go:noinline
+func andshiftRAreg_ssa(a, b int32, s uint8) int32 {
+       return a & (b >> s)
+}
+
+//go:noinline
+func orshiftRAreg_ssa(a, b int32, s uint8) int32 {
+       return a | b>>s
+}
+
+//go:noinline
+func xorshiftRAreg_ssa(a, b int32, s uint8) int32 {
+       return a ^ b>>s
+}
+
+//go:noinline
+func bicshiftRAreg_ssa(a, b int32, s uint8) int32 {
+       return a &^ (b >> s)
+}
+
+//go:noinline
+func notshiftRAreg_ssa(a int32, s uint8) int32 {
+       return ^(a >> s)
+}
+
+// test ARM shifted ops
+func testShiftedOps() {
+       a, b := uint32(10), uint32(42)
+       if want, got := a+b<<3, addshiftLL_ssa(a, b); got != want {
+               println("addshiftLL_ssa(10, 42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a-b<<3, subshiftLL_ssa(a, b); got != want {
+               println("subshiftLL_ssa(10, 42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a<<3-b, rsbshiftLL_ssa(a, b); got != want {
+               println("rsbshiftLL_ssa(10, 42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a&(b<<3), andshiftLL_ssa(a, b); got != want {
+               println("andshiftLL_ssa(10, 42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a|b<<3, orshiftLL_ssa(a, b); got != want {
+               println("orshiftLL_ssa(10, 42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a^b<<3, xorshiftLL_ssa(a, b); got != want {
+               println("xorshiftLL_ssa(10, 42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a&^(b<<3), bicshiftLL_ssa(a, b); got != want {
+               println("bicshiftLL_ssa(10, 42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := ^(a << 3), notshiftLL_ssa(a); got != want {
+               println("notshiftLL_ssa(10) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a+b>>3, addshiftRL_ssa(a, b); got != want {
+               println("addshiftRL_ssa(10, 42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a-b>>3, subshiftRL_ssa(a, b); got != want {
+               println("subshiftRL_ssa(10, 42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a>>3-b, rsbshiftRL_ssa(a, b); got != want {
+               println("rsbshiftRL_ssa(10, 42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a&(b>>3), andshiftRL_ssa(a, b); got != want {
+               println("andshiftRL_ssa(10, 42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a|b>>3, orshiftRL_ssa(a, b); got != want {
+               println("orshiftRL_ssa(10, 42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a^b>>3, xorshiftRL_ssa(a, b); got != want {
+               println("xorshiftRL_ssa(10, 42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a&^(b>>3), bicshiftRL_ssa(a, b); got != want {
+               println("bicshiftRL_ssa(10, 42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := ^(a >> 3), notshiftRL_ssa(a); got != want {
+               println("notshiftRL_ssa(10) =", got, " want ", want)
+               failed = true
+       }
+       c, d := int32(10), int32(-42)
+       if want, got := c+d>>3, addshiftRA_ssa(c, d); got != want {
+               println("addshiftRA_ssa(10, -42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := c-d>>3, subshiftRA_ssa(c, d); got != want {
+               println("subshiftRA_ssa(10, -42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := c>>3-d, rsbshiftRA_ssa(c, d); got != want {
+               println("rsbshiftRA_ssa(10, -42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := c&(d>>3), andshiftRA_ssa(c, d); got != want {
+               println("andshiftRA_ssa(10, -42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := c|d>>3, orshiftRA_ssa(c, d); got != want {
+               println("orshiftRA_ssa(10, -42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := c^d>>3, xorshiftRA_ssa(c, d); got != want {
+               println("xorshiftRA_ssa(10, -42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := c&^(d>>3), bicshiftRA_ssa(c, d); got != want {
+               println("bicshiftRA_ssa(10, -42) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := ^(d >> 3), notshiftRA_ssa(d); got != want {
+               println("notshiftRA_ssa(-42) =", got, " want ", want)
+               failed = true
+       }
+       s := uint8(3)
+       if want, got := a+b<<s, addshiftLLreg_ssa(a, b, s); got != want {
+               println("addshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a-b<<s, subshiftLLreg_ssa(a, b, s); got != want {
+               println("subshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a<<s-b, rsbshiftLLreg_ssa(a, b, s); got != want {
+               println("rsbshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a&(b<<s), andshiftLLreg_ssa(a, b, s); got != want {
+               println("andshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a|b<<s, orshiftLLreg_ssa(a, b, s); got != want {
+               println("orshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a^b<<s, xorshiftLLreg_ssa(a, b, s); got != want {
+               println("xorshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a&^(b<<s), bicshiftLLreg_ssa(a, b, s); got != want {
+               println("bicshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := ^(a << s), notshiftLLreg_ssa(a, s); got != want {
+               println("notshiftLLreg_ssa(10) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a+b>>s, addshiftRLreg_ssa(a, b, s); got != want {
+               println("addshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a-b>>s, subshiftRLreg_ssa(a, b, s); got != want {
+               println("subshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a>>s-b, rsbshiftRLreg_ssa(a, b, s); got != want {
+               println("rsbshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a&(b>>s), andshiftRLreg_ssa(a, b, s); got != want {
+               println("andshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a|b>>s, orshiftRLreg_ssa(a, b, s); got != want {
+               println("orshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a^b>>s, xorshiftRLreg_ssa(a, b, s); got != want {
+               println("xorshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := a&^(b>>s), bicshiftRLreg_ssa(a, b, s); got != want {
+               println("bicshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := ^(a >> s), notshiftRLreg_ssa(a, s); got != want {
+               println("notshiftRLreg_ssa(10) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := c+d>>s, addshiftRAreg_ssa(c, d, s); got != want {
+               println("addshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := c-d>>s, subshiftRAreg_ssa(c, d, s); got != want {
+               println("subshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := c>>s-d, rsbshiftRAreg_ssa(c, d, s); got != want {
+               println("rsbshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := c&(d>>s), andshiftRAreg_ssa(c, d, s); got != want {
+               println("andshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := c|d>>s, orshiftRAreg_ssa(c, d, s); got != want {
+               println("orshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := c^d>>s, xorshiftRAreg_ssa(c, d, s); got != want {
+               println("xorshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := c&^(d>>s), bicshiftRAreg_ssa(c, d, s); got != want {
+               println("bicshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+               failed = true
+       }
+       if want, got := ^(d >> s), notshiftRAreg_ssa(d, s); got != want {
+               println("notshiftRAreg_ssa(-42, 3) =", got, " want ", want)
+               failed = true
+       }
+}
+
 var failed = false
 
 func main() {
@@ -573,6 +1012,7 @@ func main() {
        testLoadCombine()
        testLoadSymCombine()
        testShiftRemoval()
+       testShiftedOps()
 
        if failed {
                panic("failed")
index ee68ad540f643c555cf65b9e816c484e51ce526e..7ec0e502ec37a5289ee530c6cf0a5665ce668b4f 100644 (file)
 (Not x) -> (XORconst [1] x)
 
 // shifts
-(Lsh32x32 x y) -> (SLL x y)
-(Lsh32x16 x y) -> (SLL x (ZeroExt16to32 y))
+// hardware instruction uses only the low byte of the shift
+// we compare to 256 to ensure Go semantics for large shifts
+(Lsh32x32 x y) -> (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh32x16 x y) -> (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
 (Lsh32x8  x y) -> (SLL x (ZeroExt8to32 y))
 
-(Lsh16x32 x y) -> (SLL x y)
-(Lsh16x16 x y) -> (SLL x (ZeroExt16to32 y))
+(Lsh16x32 x y) -> (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh16x16 x y) -> (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
 (Lsh16x8  x y) -> (SLL x (ZeroExt8to32 y))
 
-(Lsh8x32 x y) -> (SLL x y)
-(Lsh8x16 x y) -> (SLL x (ZeroExt16to32 y))
+(Lsh8x32 x y) -> (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh8x16 x y) -> (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
 (Lsh8x8  x y) -> (SLL x (ZeroExt8to32 y))
 
-(Rsh32Ux32 x y) -> (SRL x y)
-(Rsh32Ux16 x y) -> (SRL x (ZeroExt16to32 y))
+(Rsh32Ux32 x y) -> (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
+(Rsh32Ux16 x y) -> (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
 (Rsh32Ux8  x y) -> (SRL x (ZeroExt8to32 y))
 
-(Rsh16Ux32 x y) -> (SRL (ZeroExt16to32 x) y)
-(Rsh16Ux16 x y) -> (SRL (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Rsh16Ux32 x y) -> (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
+(Rsh16Ux16 x y) -> (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
 (Rsh16Ux8  x y) -> (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
 
-(Rsh8Ux32 x y) -> (SRL (ZeroExt8to32 x) y)
-(Rsh8Ux16 x y) -> (SRL (ZeroExt8to32 x) (ZeroExt16to32 y))
+(Rsh8Ux32 x y) -> (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
+(Rsh8Ux16 x y) -> (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
 (Rsh8Ux8  x y) -> (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
 
-(Rsh32x32 x y) -> (SRA x y)
-(Rsh32x16 x y) -> (SRA x (ZeroExt16to32 y))
+(Rsh32x32 x y) -> (SRAcond x y (CMPconst [256] y))
+(Rsh32x16 x y) -> (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
 (Rsh32x8  x y) -> (SRA x (ZeroExt8to32 y))
 
-(Rsh16x32 x y) -> (SRA (SignExt16to32 x) y)
-(Rsh16x16 x y) -> (SRA (SignExt16to32 x) (ZeroExt16to32 y))
+(Rsh16x32 x y) -> (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
+(Rsh16x16 x y) -> (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
 (Rsh16x8  x y) -> (SRA (SignExt16to32 x) (ZeroExt8to32 y))
 
-(Rsh8x32 x y) -> (SRA (SignExt8to32 x) y)
-(Rsh8x16 x y) -> (SRA (SignExt8to32 x) (ZeroExt16to32 y))
+(Rsh8x32 x y) -> (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
+(Rsh8x16 x y) -> (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
 (Rsh8x8  x y) -> (SRA (SignExt8to32 x) (ZeroExt8to32 y))
 
 // constant shifts
 (SignExt16to32 x) -> (MOVHreg x)
 
 (Signmask x) -> (SRAconst x [31])
-(Zeromask x) -> (LoweredZeromask x)
+(Zeromask x) -> (SRAconst (RSBshiftRL <config.fe.TypeInt32()> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
 
 // float <-> int conversion
 (Cvt32to32F x) -> (MOVWF x)
 (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
 (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
 
+(MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) -> x
+(MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) -> x
+(MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) -> x
+(MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) -> x
+
 // fold constant into arithmatic ops
 (ADD (MOVWconst [c]) x) -> (ADDconst [c] x)
 (ADD x (MOVWconst [c])) -> (ADDconst [c] x)
 (CMP x (MOVWconst [c])) -> (CMPconst [c] x)
 (CMP (MOVWconst [c]) x) -> (InvertFlags (CMPconst [c] x))
 
-(LoweredZeromask (MOVWconst [0])) -> (MOVWconst [0])
-(LoweredZeromask (MOVWconst [c])) && c != 0 -> (MOVWconst [0xffffffff])
-
 // don't extend after proper load
 // MOVWreg instruction is not emitted if src and dst registers are same, but it ensures the type.
 (MOVBreg x:(MOVBload _ _)) -> (MOVWreg x)
 (MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 -> (ANDconst [c&0x7f] x)
 (MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 -> (ANDconst [c&0x7fff] x)
 
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) -> (MOVWreg x)
+(MOVBUreg x:(MOVBUreg _)) -> (MOVWreg x)
+(MOVHreg x:(MOVBreg _)) -> (MOVWreg x)
+(MOVHreg x:(MOVBUreg _)) -> (MOVWreg x)
+(MOVHreg x:(MOVHreg _)) -> (MOVWreg x)
+(MOVHUreg x:(MOVBUreg _)) -> (MOVWreg x)
+(MOVHUreg x:(MOVHUreg _)) -> (MOVWreg x)
+
 // don't extend before store
 (MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
 (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
 (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
 
 // mul by constant
-(MUL x (MOVWconst [-1])) -> (RSBconst [0] x)
+(MUL x (MOVWconst [c])) && int32(c) == -1 -> (RSBconst [0] x)
 (MUL _ (MOVWconst [0])) -> (MOVWconst [0])
 (MUL x (MOVWconst [1])) -> x
 (MUL x (MOVWconst [c])) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
-
-(MUL (MOVWconst [-1]) x) -> (RSBconst [0] x)
+(MUL x (MOVWconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADDshiftLL x x [log2(c-1)])
+(MUL x (MOVWconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (RSBshiftLL x x [log2(c+1)])
+(MUL x (MOVWconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MUL x (MOVWconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MUL x (MOVWconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
+(MUL x (MOVWconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+(MUL (MOVWconst [c]) x) && int32(c) == -1 -> (RSBconst [0] x)
 (MUL (MOVWconst [0]) _) -> (MOVWconst [0])
 (MUL (MOVWconst [1]) x) -> x
 (MUL (MOVWconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
-
-(MULA x (MOVWconst [-1]) a) -> (SUB a x)
+(MUL (MOVWconst [c]) x) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADDshiftLL x x [log2(c-1)])
+(MUL (MOVWconst [c]) x) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (RSBshiftLL x x [log2(c+1)])
+(MUL (MOVWconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MUL (MOVWconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MUL (MOVWconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
+(MUL (MOVWconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+(MULA x (MOVWconst [c]) a) && int32(c) == -1 -> (SUB a x)
 (MULA _ (MOVWconst [0]) a) -> a
 (MULA x (MOVWconst [1]) a) -> (ADD x a)
 (MULA x (MOVWconst [c]) a) && isPowerOfTwo(c) -> (ADD (SLLconst <x.Type> [log2(c)] x) a)
-
-(MULA (MOVWconst [-1]) x a) -> (SUB a x)
+(MULA x (MOVWconst [c]) a) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
+(MULA x (MOVWconst [c]) a) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
+(MULA x (MOVWconst [c]) a) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
+(MULA x (MOVWconst [c]) a) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
+(MULA x (MOVWconst [c]) a) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
+(MULA x (MOVWconst [c]) a) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
+
+(MULA (MOVWconst [c]) x a) && int32(c) == -1 -> (SUB a x)
 (MULA (MOVWconst [0]) _ a) -> a
 (MULA (MOVWconst [1]) x a) -> (ADD x a)
 (MULA (MOVWconst [c]) x a) && isPowerOfTwo(c) -> (ADD (SLLconst <x.Type> [log2(c)] x) a)
+(MULA (MOVWconst [c]) x a) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
+(MULA (MOVWconst [c]) x a) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
+(MULA (MOVWconst [c]) x a) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
+(MULA (MOVWconst [c]) x a) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
+(MULA (MOVWconst [c]) x a) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
+(MULA (MOVWconst [c]) x a) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
 
 // div by constant
 (DIVU x (MOVWconst [1])) -> x
 (GreaterEqual (InvertFlags x)) -> (LessEqual x)
 (GreaterEqualU (InvertFlags x)) -> (LessEqualU x)
 
+// absorb flag constants into conditional instructions
+(CMOVWLSconst _ (FlagEQ) [c]) -> (MOVWconst [c])
+(CMOVWLSconst _ (FlagLT_ULT) [c]) -> (MOVWconst [c])
+(CMOVWLSconst x (FlagLT_UGT)) -> x
+(CMOVWLSconst _ (FlagGT_ULT) [c]) -> (MOVWconst [c])
+(CMOVWLSconst x (FlagGT_UGT)) -> x
+
+(CMOVWHSconst _ (FlagEQ) [c]) -> (MOVWconst [c])
+(CMOVWHSconst x (FlagLT_ULT)) -> x
+(CMOVWHSconst _ (FlagLT_UGT) [c]) -> (MOVWconst [c])
+(CMOVWHSconst x (FlagGT_ULT)) -> x
+(CMOVWHSconst _ (FlagGT_UGT) [c]) -> (MOVWconst [c])
+
+(CMOVWLSconst x (InvertFlags flags) [c]) -> (CMOVWHSconst x flags [c])
+(CMOVWHSconst x (InvertFlags flags) [c]) -> (CMOVWLSconst x flags [c])
+
+(SRAcond x _ (FlagEQ)) -> (SRAconst x [31])
+(SRAcond x y (FlagLT_ULT)) -> (SRA x y)
+(SRAcond x _ (FlagLT_UGT)) -> (SRAconst x [31])
+(SRAcond x y (FlagGT_ULT)) -> (SRA x y)
+(SRAcond x _ (FlagGT_UGT)) -> (SRAconst x [31])
+
 // remove redundant *const ops
 (ADDconst [0] x) -> x
 (SUBconst [0] x) -> x
 (XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
 (BICconst [c] (MOVWconst [d])) -> (MOVWconst [d&^c])
 (MVN (MOVWconst [c])) -> (MOVWconst [^c])
+(MOVBreg (MOVWconst [c])) -> (MOVWconst [int64(int8(c))])
+(MOVBUreg (MOVWconst [c])) -> (MOVWconst [int64(uint8(c))])
+(MOVHreg (MOVWconst [c])) -> (MOVWconst [int64(int16(c))])
+(MOVHUreg (MOVWconst [c])) -> (MOVWconst [int64(uint16(c))])
+(MOVWreg (MOVWconst [c])) -> (MOVWconst [c])
+
+// absorb shifts into ops
+(ADD x (SLLconst [c] y)) -> (ADDshiftLL x y [c])
+(ADD (SLLconst [c] y) x) -> (ADDshiftLL x y [c])
+(ADD x (SRLconst [c] y)) -> (ADDshiftRL x y [c])
+(ADD (SRLconst [c] y) x) -> (ADDshiftRL x y [c])
+(ADD x (SRAconst [c] y)) -> (ADDshiftRA x y [c])
+(ADD (SRAconst [c] y) x) -> (ADDshiftRA x y [c])
+(ADD x (SLL y z)) -> (ADDshiftLLreg x y z)
+(ADD (SLL y z) x) -> (ADDshiftLLreg x y z)
+(ADD x (SRL y z)) -> (ADDshiftRLreg x y z)
+(ADD (SRL y z) x) -> (ADDshiftRLreg x y z)
+(ADD x (SRA y z)) -> (ADDshiftRAreg x y z)
+(ADD (SRA y z) x) -> (ADDshiftRAreg x y z)
+(ADC x (SLLconst [c] y) flags) -> (ADCshiftLL x y [c] flags)
+(ADC (SLLconst [c] y) x flags) -> (ADCshiftLL x y [c] flags)
+(ADC x (SRLconst [c] y) flags) -> (ADCshiftRL x y [c] flags)
+(ADC (SRLconst [c] y) x flags) -> (ADCshiftRL x y [c] flags)
+(ADC x (SRAconst [c] y) flags) -> (ADCshiftRA x y [c] flags)
+(ADC (SRAconst [c] y) x flags) -> (ADCshiftRA x y [c] flags)
+(ADC x (SLL y z) flags) -> (ADCshiftLLreg x y z flags)
+(ADC (SLL y z) x flags) -> (ADCshiftLLreg x y z flags)
+(ADC x (SRL y z) flags) -> (ADCshiftRLreg x y z flags)
+(ADC (SRL y z) x flags) -> (ADCshiftRLreg x y z flags)
+(ADC x (SRA y z) flags) -> (ADCshiftRAreg x y z flags)
+(ADC (SRA y z) x flags) -> (ADCshiftRAreg x y z flags)
+(ADDS x (SLLconst [c] y)) -> (ADDSshiftLL x y [c])
+(ADDS (SLLconst [c] y) x) -> (ADDSshiftLL x y [c])
+(ADDS x (SRLconst [c] y)) -> (ADDSshiftRL x y [c])
+(ADDS (SRLconst [c] y) x) -> (ADDSshiftRL x y [c])
+(ADDS x (SRAconst [c] y)) -> (ADDSshiftRA x y [c])
+(ADDS (SRAconst [c] y) x) -> (ADDSshiftRA x y [c])
+(ADDS x (SLL y z)) -> (ADDSshiftLLreg x y z)
+(ADDS (SLL y z) x) -> (ADDSshiftLLreg x y z)
+(ADDS x (SRL y z)) -> (ADDSshiftRLreg x y z)
+(ADDS (SRL y z) x) -> (ADDSshiftRLreg x y z)
+(ADDS x (SRA y z)) -> (ADDSshiftRAreg x y z)
+(ADDS (SRA y z) x) -> (ADDSshiftRAreg x y z)
+(SUB x (SLLconst [c] y)) -> (SUBshiftLL x y [c])
+(SUB (SLLconst [c] y) x) -> (RSBshiftLL x y [c])
+(SUB x (SRLconst [c] y)) -> (SUBshiftRL x y [c])
+(SUB (SRLconst [c] y) x) -> (RSBshiftRL x y [c])
+(SUB x (SRAconst [c] y)) -> (SUBshiftRA x y [c])
+(SUB (SRAconst [c] y) x) -> (RSBshiftRA x y [c])
+(SUB x (SLL y z)) -> (SUBshiftLLreg x y z)
+(SUB (SLL y z) x) -> (RSBshiftLLreg x y z)
+(SUB x (SRL y z)) -> (SUBshiftRLreg x y z)
+(SUB (SRL y z) x) -> (RSBshiftRLreg x y z)
+(SUB x (SRA y z)) -> (SUBshiftRAreg x y z)
+(SUB (SRA y z) x) -> (RSBshiftRAreg x y z)
+(SBC x (SLLconst [c] y) flags) -> (SBCshiftLL x y [c] flags)
+(SBC (SLLconst [c] y) x flags) -> (RSCshiftLL x y [c] flags)
+(SBC x (SRLconst [c] y) flags) -> (SBCshiftRL x y [c] flags)
+(SBC (SRLconst [c] y) x flags) -> (RSCshiftRL x y [c] flags)
+(SBC x (SRAconst [c] y) flags) -> (SBCshiftRA x y [c] flags)
+(SBC (SRAconst [c] y) x flags) -> (RSCshiftRA x y [c] flags)
+(SBC x (SLL y z) flags) -> (SBCshiftLLreg x y z flags)
+(SBC (SLL y z) x flags) -> (RSCshiftLLreg x y z flags)
+(SBC x (SRL y z) flags) -> (SBCshiftRLreg x y z flags)
+(SBC (SRL y z) x flags) -> (RSCshiftRLreg x y z flags)
+(SBC x (SRA y z) flags) -> (SBCshiftRAreg x y z flags)
+(SBC (SRA y z) x flags) -> (RSCshiftRAreg x y z flags)
+(SUBS x (SLLconst [c] y)) -> (SUBSshiftLL x y [c])
+(SUBS (SLLconst [c] y) x) -> (RSBSshiftLL x y [c])
+(SUBS x (SRLconst [c] y)) -> (SUBSshiftRL x y [c])
+(SUBS (SRLconst [c] y) x) -> (RSBSshiftRL x y [c])
+(SUBS x (SRAconst [c] y)) -> (SUBSshiftRA x y [c])
+(SUBS (SRAconst [c] y) x) -> (RSBSshiftRA x y [c])
+(SUBS x (SLL y z)) -> (SUBSshiftLLreg x y z)
+(SUBS (SLL y z) x) -> (RSBSshiftLLreg x y z)
+(SUBS x (SRL y z)) -> (SUBSshiftRLreg x y z)
+(SUBS (SRL y z) x) -> (RSBSshiftRLreg x y z)
+(SUBS x (SRA y z)) -> (SUBSshiftRAreg x y z)
+(SUBS (SRA y z) x) -> (RSBSshiftRAreg x y z)
+(RSB x (SLLconst [c] y)) -> (RSBshiftLL x y [c])
+(RSB (SLLconst [c] y) x) -> (SUBshiftLL x y [c])
+(RSB x (SRLconst [c] y)) -> (RSBshiftRL x y [c])
+(RSB (SRLconst [c] y) x) -> (SUBshiftRL x y [c])
+(RSB x (SRAconst [c] y)) -> (RSBshiftRA x y [c])
+(RSB (SRAconst [c] y) x) -> (SUBshiftRA x y [c])
+(RSB x (SLL y z)) -> (RSBshiftLLreg x y z)
+(RSB (SLL y z) x) -> (SUBshiftLLreg x y z)
+(RSB x (SRL y z)) -> (RSBshiftRLreg x y z)
+(RSB (SRL y z) x) -> (SUBshiftRLreg x y z)
+(RSB x (SRA y z)) -> (RSBshiftRAreg x y z)
+(RSB (SRA y z) x) -> (SUBshiftRAreg x y z)
+(AND x (SLLconst [c] y)) -> (ANDshiftLL x y [c])
+(AND (SLLconst [c] y) x) -> (ANDshiftLL x y [c])
+(AND x (SRLconst [c] y)) -> (ANDshiftRL x y [c])
+(AND (SRLconst [c] y) x) -> (ANDshiftRL x y [c])
+(AND x (SRAconst [c] y)) -> (ANDshiftRA x y [c])
+(AND (SRAconst [c] y) x) -> (ANDshiftRA x y [c])
+(AND x (SLL y z)) -> (ANDshiftLLreg x y z)
+(AND (SLL y z) x) -> (ANDshiftLLreg x y z)
+(AND x (SRL y z)) -> (ANDshiftRLreg x y z)
+(AND (SRL y z) x) -> (ANDshiftRLreg x y z)
+(AND x (SRA y z)) -> (ANDshiftRAreg x y z)
+(AND (SRA y z) x) -> (ANDshiftRAreg x y z)
+(OR x (SLLconst [c] y)) -> (ORshiftLL x y [c])
+(OR (SLLconst [c] y) x) -> (ORshiftLL x y [c])
+(OR x (SRLconst [c] y)) -> (ORshiftRL x y [c])
+(OR (SRLconst [c] y) x) -> (ORshiftRL x y [c])
+(OR x (SRAconst [c] y)) -> (ORshiftRA x y [c])
+(OR (SRAconst [c] y) x) -> (ORshiftRA x y [c])
+(OR x (SLL y z)) -> (ORshiftLLreg x y z)
+(OR (SLL y z) x) -> (ORshiftLLreg x y z)
+(OR x (SRL y z)) -> (ORshiftRLreg x y z)
+(OR (SRL y z) x) -> (ORshiftRLreg x y z)
+(OR x (SRA y z)) -> (ORshiftRAreg x y z)
+(OR (SRA y z) x) -> (ORshiftRAreg x y z)
+(XOR x (SLLconst [c] y)) -> (XORshiftLL x y [c])
+(XOR (SLLconst [c] y) x) -> (XORshiftLL x y [c])
+(XOR x (SRLconst [c] y)) -> (XORshiftRL x y [c])
+(XOR (SRLconst [c] y) x) -> (XORshiftRL x y [c])
+(XOR x (SRAconst [c] y)) -> (XORshiftRA x y [c])
+(XOR (SRAconst [c] y) x) -> (XORshiftRA x y [c])
+(XOR x (SLL y z)) -> (XORshiftLLreg x y z)
+(XOR (SLL y z) x) -> (XORshiftLLreg x y z)
+(XOR x (SRL y z)) -> (XORshiftRLreg x y z)
+(XOR (SRL y z) x) -> (XORshiftRLreg x y z)
+(XOR x (SRA y z)) -> (XORshiftRAreg x y z)
+(XOR (SRA y z) x) -> (XORshiftRAreg x y z)
+(BIC x (SLLconst [c] y)) -> (BICshiftLL x y [c])
+(BIC x (SRLconst [c] y)) -> (BICshiftRL x y [c])
+(BIC x (SRAconst [c] y)) -> (BICshiftRA x y [c])
+(BIC x (SLL y z)) -> (BICshiftLLreg x y z)
+(BIC x (SRL y z)) -> (BICshiftRLreg x y z)
+(BIC x (SRA y z)) -> (BICshiftRAreg x y z)
+(MVN (SLLconst [c] x)) -> (MVNshiftLL x [c])
+(MVN (SRLconst [c] x)) -> (MVNshiftRL x [c])
+(MVN (SRAconst [c] x)) -> (MVNshiftRA x [c])
+(MVN (SLL x y)) -> (MVNshiftLLreg x y)
+(MVN (SRL x y)) -> (MVNshiftRLreg x y)
+(MVN (SRA x y)) -> (MVNshiftRAreg x y)
+
+(CMP x (SLLconst [c] y)) -> (CMPshiftLL x y [c])
+(CMP (SLLconst [c] y) x) -> (InvertFlags (CMPshiftLL x y [c]))
+(CMP x (SRLconst [c] y)) -> (CMPshiftRL x y [c])
+(CMP (SRLconst [c] y) x) -> (InvertFlags (CMPshiftRL x y [c]))
+(CMP x (SRAconst [c] y)) -> (CMPshiftRA x y [c])
+(CMP (SRAconst [c] y) x) -> (InvertFlags (CMPshiftRA x y [c]))
+(CMP x (SLL y z)) -> (CMPshiftLLreg x y z)
+(CMP (SLL y z) x) -> (InvertFlags (CMPshiftLLreg x y z))
+(CMP x (SRL y z)) -> (CMPshiftRLreg x y z)
+(CMP (SRL y z) x) -> (InvertFlags (CMPshiftRLreg x y z))
+(CMP x (SRA y z)) -> (CMPshiftRAreg x y z)
+(CMP (SRA y z) x) -> (InvertFlags (CMPshiftRAreg x y z))
+
+// prefer *const ops to *shift ops
+(ADDshiftLL (MOVWconst [c]) x [d]) -> (ADDconst [c] (SLLconst <x.Type> x [d]))
+(ADDshiftRL (MOVWconst [c]) x [d]) -> (ADDconst [c] (SRLconst <x.Type> x [d]))
+(ADDshiftRA (MOVWconst [c]) x [d]) -> (ADDconst [c] (SRAconst <x.Type> x [d]))
+(ADCshiftLL (MOVWconst [c]) x [d] flags) -> (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
+(ADCshiftRL (MOVWconst [c]) x [d] flags) -> (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
+(ADCshiftRA (MOVWconst [c]) x [d] flags) -> (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
+(ADDSshiftLL (MOVWconst [c]) x [d]) -> (ADDSconst [c] (SLLconst <x.Type> x [d]))
+(ADDSshiftRL (MOVWconst [c]) x [d]) -> (ADDSconst [c] (SRLconst <x.Type> x [d]))
+(ADDSshiftRA (MOVWconst [c]) x [d]) -> (ADDSconst [c] (SRAconst <x.Type> x [d]))
+(SUBshiftLL (MOVWconst [c]) x [d]) -> (RSBconst [c] (SLLconst <x.Type> x [d]))
+(SUBshiftRL (MOVWconst [c]) x [d]) -> (RSBconst [c] (SRLconst <x.Type> x [d]))
+(SUBshiftRA (MOVWconst [c]) x [d]) -> (RSBconst [c] (SRAconst <x.Type> x [d]))
+(SBCshiftLL (MOVWconst [c]) x [d] flags) -> (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
+(SBCshiftRL (MOVWconst [c]) x [d] flags) -> (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
+(SBCshiftRA (MOVWconst [c]) x [d] flags) -> (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
+(SUBSshiftLL (MOVWconst [c]) x [d]) -> (RSBSconst [c] (SLLconst <x.Type> x [d]))
+(SUBSshiftRL (MOVWconst [c]) x [d]) -> (RSBSconst [c] (SRLconst <x.Type> x [d]))
+(SUBSshiftRA (MOVWconst [c]) x [d]) -> (RSBSconst [c] (SRAconst <x.Type> x [d]))
+(RSBshiftLL (MOVWconst [c]) x [d]) -> (SUBconst [c] (SLLconst <x.Type> x [d]))
+(RSBshiftRL (MOVWconst [c]) x [d]) -> (SUBconst [c] (SRLconst <x.Type> x [d]))
+(RSBshiftRA (MOVWconst [c]) x [d]) -> (SUBconst [c] (SRAconst <x.Type> x [d]))
+(RSCshiftLL (MOVWconst [c]) x [d] flags) -> (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
+(RSCshiftRL (MOVWconst [c]) x [d] flags) -> (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
+(RSCshiftRA (MOVWconst [c]) x [d] flags) -> (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
+(RSBSshiftLL (MOVWconst [c]) x [d]) -> (SUBSconst [c] (SLLconst <x.Type> x [d]))
+(RSBSshiftRL (MOVWconst [c]) x [d]) -> (SUBSconst [c] (SRLconst <x.Type> x [d]))
+(RSBSshiftRA (MOVWconst [c]) x [d]) -> (SUBSconst [c] (SRAconst <x.Type> x [d]))
+(ANDshiftLL (MOVWconst [c]) x [d]) -> (ANDconst [c] (SLLconst <x.Type> x [d]))
+(ANDshiftRL (MOVWconst [c]) x [d]) -> (ANDconst [c] (SRLconst <x.Type> x [d]))
+(ANDshiftRA (MOVWconst [c]) x [d]) -> (ANDconst [c] (SRAconst <x.Type> x [d]))
+(ORshiftLL (MOVWconst [c]) x [d]) -> (ORconst [c] (SLLconst <x.Type> x [d]))
+(ORshiftRL (MOVWconst [c]) x [d]) -> (ORconst [c] (SRLconst <x.Type> x [d]))
+(ORshiftRA (MOVWconst [c]) x [d]) -> (ORconst [c] (SRAconst <x.Type> x [d]))
+(XORshiftLL (MOVWconst [c]) x [d]) -> (XORconst [c] (SLLconst <x.Type> x [d]))
+(XORshiftRL (MOVWconst [c]) x [d]) -> (XORconst [c] (SRLconst <x.Type> x [d]))
+(XORshiftRA (MOVWconst [c]) x [d]) -> (XORconst [c] (SRAconst <x.Type> x [d]))
+(CMPshiftLL (MOVWconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+(CMPshiftRL (MOVWconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+(CMPshiftRA (MOVWconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+
+(ADDshiftLLreg (MOVWconst [c]) x y) -> (ADDconst [c] (SLL <x.Type> x y))
+(ADDshiftRLreg (MOVWconst [c]) x y) -> (ADDconst [c] (SRL <x.Type> x y))
+(ADDshiftRAreg (MOVWconst [c]) x y) -> (ADDconst [c] (SRA <x.Type> x y))
+(ADCshiftLLreg (MOVWconst [c]) x y flags) -> (ADCconst [c] (SLL <x.Type> x y) flags)
+(ADCshiftRLreg (MOVWconst [c]) x y flags) -> (ADCconst [c] (SRL <x.Type> x y) flags)
+(ADCshiftRAreg (MOVWconst [c]) x y flags) -> (ADCconst [c] (SRA <x.Type> x y) flags)
+(ADDSshiftLLreg (MOVWconst [c]) x y) -> (ADDSconst [c] (SLL <x.Type> x y))
+(ADDSshiftRLreg (MOVWconst [c]) x y) -> (ADDSconst [c] (SRL <x.Type> x y))
+(ADDSshiftRAreg (MOVWconst [c]) x y) -> (ADDSconst [c] (SRA <x.Type> x y))
+(SUBshiftLLreg (MOVWconst [c]) x y) -> (RSBconst [c] (SLL <x.Type> x y))
+(SUBshiftRLreg (MOVWconst [c]) x y) -> (RSBconst [c] (SRL <x.Type> x y))
+(SUBshiftRAreg (MOVWconst [c]) x y) -> (RSBconst [c] (SRA <x.Type> x y))
+(SBCshiftLLreg (MOVWconst [c]) x y flags) -> (RSCconst [c] (SLL <x.Type> x y) flags)
+(SBCshiftRLreg (MOVWconst [c]) x y flags) -> (RSCconst [c] (SRL <x.Type> x y) flags)
+(SBCshiftRAreg (MOVWconst [c]) x y flags) -> (RSCconst [c] (SRA <x.Type> x y) flags)
+(SUBSshiftLLreg (MOVWconst [c]) x y) -> (RSBSconst [c] (SLL <x.Type> x y))
+(SUBSshiftRLreg (MOVWconst [c]) x y) -> (RSBSconst [c] (SRL <x.Type> x y))
+(SUBSshiftRAreg (MOVWconst [c]) x y) -> (RSBSconst [c] (SRA <x.Type> x y))
+(RSBshiftLLreg (MOVWconst [c]) x y) -> (SUBconst [c] (SLL <x.Type> x y))
+(RSBshiftRLreg (MOVWconst [c]) x y) -> (SUBconst [c] (SRL <x.Type> x y))
+(RSBshiftRAreg (MOVWconst [c]) x y) -> (SUBconst [c] (SRA <x.Type> x y))
+(RSCshiftLLreg (MOVWconst [c]) x y flags) -> (SBCconst [c] (SLL <x.Type> x y) flags)
+(RSCshiftRLreg (MOVWconst [c]) x y flags) -> (SBCconst [c] (SRL <x.Type> x y) flags)
+(RSCshiftRAreg (MOVWconst [c]) x y flags) -> (SBCconst [c] (SRA <x.Type> x y) flags)
+(RSBSshiftLLreg (MOVWconst [c]) x y) -> (SUBSconst [c] (SLL <x.Type> x y))
+(RSBSshiftRLreg (MOVWconst [c]) x y) -> (SUBSconst [c] (SRL <x.Type> x y))
+(RSBSshiftRAreg (MOVWconst [c]) x y) -> (SUBSconst [c] (SRA <x.Type> x y))
+(ANDshiftLLreg (MOVWconst [c]) x y) -> (ANDconst [c] (SLL <x.Type> x y))
+(ANDshiftRLreg (MOVWconst [c]) x y) -> (ANDconst [c] (SRL <x.Type> x y))
+(ANDshiftRAreg (MOVWconst [c]) x y) -> (ANDconst [c] (SRA <x.Type> x y))
+(ORshiftLLreg (MOVWconst [c]) x y) -> (ORconst [c] (SLL <x.Type> x y))
+(ORshiftRLreg (MOVWconst [c]) x y) -> (ORconst [c] (SRL <x.Type> x y))
+(ORshiftRAreg (MOVWconst [c]) x y) -> (ORconst [c] (SRA <x.Type> x y))
+(XORshiftLLreg (MOVWconst [c]) x y) -> (XORconst [c] (SLL <x.Type> x y))
+(XORshiftRLreg (MOVWconst [c]) x y) -> (XORconst [c] (SRL <x.Type> x y))
+(XORshiftRAreg (MOVWconst [c]) x y) -> (XORconst [c] (SRA <x.Type> x y))
+(CMPshiftLLreg (MOVWconst [c]) x y) -> (InvertFlags (CMPconst [c] (SLL <x.Type> x y)))
+(CMPshiftRLreg (MOVWconst [c]) x y) -> (InvertFlags (CMPconst [c] (SRL <x.Type> x y)))
+(CMPshiftRAreg (MOVWconst [c]) x y) -> (InvertFlags (CMPconst [c] (SRA <x.Type> x y)))
+
+// constant folding in *shift ops
+(ADDshiftLL x (MOVWconst [c]) [d]) -> (ADDconst x [int64(uint32(c)<<uint64(d))])
+(ADDshiftRL x (MOVWconst [c]) [d]) -> (ADDconst x [int64(uint32(c)>>uint64(d))])
+(ADDshiftRA x (MOVWconst [c]) [d]) -> (ADDconst x [int64(int32(c)>>uint64(d))])
+(ADCshiftLL x (MOVWconst [c]) [d] flags) -> (ADCconst x [int64(uint32(c)<<uint64(d))] flags)
+(ADCshiftRL x (MOVWconst [c]) [d] flags) -> (ADCconst x [int64(uint32(c)>>uint64(d))] flags)
+(ADCshiftRA x (MOVWconst [c]) [d] flags) -> (ADCconst x [int64(int32(c)>>uint64(d))] flags)
+(ADDSshiftLL x (MOVWconst [c]) [d]) -> (ADDSconst x [int64(uint32(c)<<uint64(d))])
+(ADDSshiftRL x (MOVWconst [c]) [d]) -> (ADDSconst x [int64(uint32(c)>>uint64(d))])
+(ADDSshiftRA x (MOVWconst [c]) [d]) -> (ADDSconst x [int64(int32(c)>>uint64(d))])
+(SUBshiftLL x (MOVWconst [c]) [d]) -> (SUBconst x [int64(uint32(c)<<uint64(d))])
+(SUBshiftRL x (MOVWconst [c]) [d]) -> (SUBconst x [int64(uint32(c)>>uint64(d))])
+(SUBshiftRA x (MOVWconst [c]) [d]) -> (SUBconst x [int64(int32(c)>>uint64(d))])
+(SBCshiftLL x (MOVWconst [c]) [d] flags) -> (SBCconst x [int64(uint32(c)<<uint64(d))] flags)
+(SBCshiftRL x (MOVWconst [c]) [d] flags) -> (SBCconst x [int64(uint32(c)>>uint64(d))] flags)
+(SBCshiftRA x (MOVWconst [c]) [d] flags) -> (SBCconst x [int64(int32(c)>>uint64(d))] flags)
+(SUBSshiftLL x (MOVWconst [c]) [d]) -> (SUBSconst x [int64(uint32(c)<<uint64(d))])
+(SUBSshiftRL x (MOVWconst [c]) [d]) -> (SUBSconst x [int64(uint32(c)>>uint64(d))])
+(SUBSshiftRA x (MOVWconst [c]) [d]) -> (SUBSconst x [int64(int32(c)>>uint64(d))])
+(RSBshiftLL x (MOVWconst [c]) [d]) -> (RSBconst x [int64(uint32(c)<<uint64(d))])
+(RSBshiftRL x (MOVWconst [c]) [d]) -> (RSBconst x [int64(uint32(c)>>uint64(d))])
+(RSBshiftRA x (MOVWconst [c]) [d]) -> (RSBconst x [int64(int32(c)>>uint64(d))])
+(RSCshiftLL x (MOVWconst [c]) [d] flags) -> (RSCconst x [int64(uint32(c)<<uint64(d))] flags)
+(RSCshiftRL x (MOVWconst [c]) [d] flags) -> (RSCconst x [int64(uint32(c)>>uint64(d))] flags)
+(RSCshiftRA x (MOVWconst [c]) [d] flags) -> (RSCconst x [int64(int32(c)>>uint64(d))] flags)
+(RSBSshiftLL x (MOVWconst [c]) [d]) -> (RSBSconst x [int64(uint32(c)<<uint64(d))])
+(RSBSshiftRL x (MOVWconst [c]) [d]) -> (RSBSconst x [int64(uint32(c)>>uint64(d))])
+(RSBSshiftRA x (MOVWconst [c]) [d]) -> (RSBSconst x [int64(int32(c)>>uint64(d))])
+(ANDshiftLL x (MOVWconst [c]) [d]) -> (ANDconst x [int64(uint32(c)<<uint64(d))])
+(ANDshiftRL x (MOVWconst [c]) [d]) -> (ANDconst x [int64(uint32(c)>>uint64(d))])
+(ANDshiftRA x (MOVWconst [c]) [d]) -> (ANDconst x [int64(int32(c)>>uint64(d))])
+(ORshiftLL x (MOVWconst [c]) [d]) -> (ORconst x [int64(uint32(c)<<uint64(d))])
+(ORshiftRL x (MOVWconst [c]) [d]) -> (ORconst x [int64(uint32(c)>>uint64(d))])
+(ORshiftRA x (MOVWconst [c]) [d]) -> (ORconst x [int64(int32(c)>>uint64(d))])
+(XORshiftLL x (MOVWconst [c]) [d]) -> (XORconst x [int64(uint32(c)<<uint64(d))])
+(XORshiftRL x (MOVWconst [c]) [d]) -> (XORconst x [int64(uint32(c)>>uint64(d))])
+(XORshiftRA x (MOVWconst [c]) [d]) -> (XORconst x [int64(int32(c)>>uint64(d))])
+(BICshiftLL x (MOVWconst [c]) [d]) -> (BICconst x [int64(uint32(c)<<uint64(d))])
+(BICshiftRL x (MOVWconst [c]) [d]) -> (BICconst x [int64(uint32(c)>>uint64(d))])
+(BICshiftRA x (MOVWconst [c]) [d]) -> (BICconst x [int64(int32(c)>>uint64(d))])
+(MVNshiftLL (MOVWconst [c]) [d]) -> (MOVWconst [^int64(uint32(c)<<uint64(d))])
+(MVNshiftRL (MOVWconst [c]) [d]) -> (MOVWconst [^int64(uint32(c)>>uint64(d))])
+(MVNshiftRA (MOVWconst [c]) [d]) -> (MOVWconst [^int64(int32(c)>>uint64(d))])
+(CMPshiftLL x (MOVWconst [c]) [d]) -> (CMPconst x [int64(uint32(c)<<uint64(d))])
+(CMPshiftRL x (MOVWconst [c]) [d]) -> (CMPconst x [int64(uint32(c)>>uint64(d))])
+(CMPshiftRA x (MOVWconst [c]) [d]) -> (CMPconst x [int64(int32(c)>>uint64(d))])
+
+(ADDshiftLLreg x y (MOVWconst [c])) -> (ADDshiftLL x y [c])
+(ADDshiftRLreg x y (MOVWconst [c])) -> (ADDshiftRL x y [c])
+(ADDshiftRAreg x y (MOVWconst [c])) -> (ADDshiftRA x y [c])
+(ADCshiftLLreg x y (MOVWconst [c]) flags) -> (ADCshiftLL x y [c] flags)
+(ADCshiftRLreg x y (MOVWconst [c]) flags) -> (ADCshiftRL x y [c] flags)
+(ADCshiftRAreg x y (MOVWconst [c]) flags) -> (ADCshiftRA x y [c] flags)
+(ADDSshiftLLreg x y (MOVWconst [c])) -> (ADDSshiftLL x y [c])
+(ADDSshiftRLreg x y (MOVWconst [c])) -> (ADDSshiftRL x y [c])
+(ADDSshiftRAreg x y (MOVWconst [c])) -> (ADDSshiftRA x y [c])
+(SUBshiftLLreg x y (MOVWconst [c])) -> (SUBshiftLL x y [c])
+(SUBshiftRLreg x y (MOVWconst [c])) -> (SUBshiftRL x y [c])
+(SUBshiftRAreg x y (MOVWconst [c])) -> (SUBshiftRA x y [c])
+(SBCshiftLLreg x y (MOVWconst [c]) flags) -> (SBCshiftLL x y [c] flags)
+(SBCshiftRLreg x y (MOVWconst [c]) flags) -> (SBCshiftRL x y [c] flags)
+(SBCshiftRAreg x y (MOVWconst [c]) flags) -> (SBCshiftRA x y [c] flags)
+(SUBSshiftLLreg x y (MOVWconst [c])) -> (SUBSshiftLL x y [c])
+(SUBSshiftRLreg x y (MOVWconst [c])) -> (SUBSshiftRL x y [c])
+(SUBSshiftRAreg x y (MOVWconst [c])) -> (SUBSshiftRA x y [c])
+(RSBshiftLLreg x y (MOVWconst [c])) -> (RSBshiftLL x y [c])
+(RSBshiftRLreg x y (MOVWconst [c])) -> (RSBshiftRL x y [c])
+(RSBshiftRAreg x y (MOVWconst [c])) -> (RSBshiftRA x y [c])
+(RSCshiftLLreg x y (MOVWconst [c]) flags) -> (RSCshiftLL x y [c] flags)
+(RSCshiftRLreg x y (MOVWconst [c]) flags) -> (RSCshiftRL x y [c] flags)
+(RSCshiftRAreg x y (MOVWconst [c]) flags) -> (RSCshiftRA x y [c] flags)
+(RSBSshiftLLreg x y (MOVWconst [c])) -> (RSBSshiftLL x y [c])
+(RSBSshiftRLreg x y (MOVWconst [c])) -> (RSBSshiftRL x y [c])
+(RSBSshiftRAreg x y (MOVWconst [c])) -> (RSBSshiftRA x y [c])
+(ANDshiftLLreg x y (MOVWconst [c])) -> (ANDshiftLL x y [c])
+(ANDshiftRLreg x y (MOVWconst [c])) -> (ANDshiftRL x y [c])
+(ANDshiftRAreg x y (MOVWconst [c])) -> (ANDshiftRA x y [c])
+(ORshiftLLreg x y (MOVWconst [c])) -> (ORshiftLL x y [c])
+(ORshiftRLreg x y (MOVWconst [c])) -> (ORshiftRL x y [c])
+(ORshiftRAreg x y (MOVWconst [c])) -> (ORshiftRA x y [c])
+(XORshiftLLreg x y (MOVWconst [c])) -> (XORshiftLL x y [c])
+(XORshiftRLreg x y (MOVWconst [c])) -> (XORshiftRL x y [c])
+(XORshiftRAreg x y (MOVWconst [c])) -> (XORshiftRA x y [c])
+(BICshiftLLreg x y (MOVWconst [c])) -> (BICshiftLL x y [c])
+(BICshiftRLreg x y (MOVWconst [c])) -> (BICshiftRL x y [c])
+(BICshiftRAreg x y (MOVWconst [c])) -> (BICshiftRA x y [c])
+(MVNshiftLLreg x (MOVWconst [c])) -> (MVNshiftLL x [c])
+(MVNshiftRLreg x (MOVWconst [c])) -> (MVNshiftRL x [c])
+(MVNshiftRAreg x (MOVWconst [c])) -> (MVNshiftRA x [c])
+(CMPshiftLLreg x y (MOVWconst [c])) -> (CMPshiftLL x y [c])
+(CMPshiftRLreg x y (MOVWconst [c])) -> (CMPshiftRL x y [c])
+(CMPshiftRAreg x y (MOVWconst [c])) -> (CMPshiftRA x y [c])
+
+// use indexed loads and stores
+(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVWloadidx ptr idx mem)
+(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil -> (MOVWstoreidx ptr idx val mem)
+(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftRA ptr idx [c] mem)
+(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftRA ptr idx [c] val mem)
+
+// constant folding in indexed loads and stores
+(MOVWloadidx ptr (MOVWconst [c]) mem) -> (MOVWload [c] ptr mem)
+(MOVWloadidx (MOVWconst [c]) ptr mem) -> (MOVWload [c] ptr mem)
+
+(MOVWstoreidx ptr (MOVWconst [c]) val mem) -> (MOVWstore [c] ptr val mem)
+(MOVWstoreidx (MOVWconst [c]) ptr val mem) -> (MOVWstore [c] ptr val mem)
+
+(MOVWloadidx ptr (SLLconst idx [c]) mem) -> (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWloadidx (SLLconst idx [c]) ptr mem) -> (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWloadidx ptr (SRLconst idx [c]) mem) -> (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWloadidx (SRLconst idx [c]) ptr mem) -> (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWloadidx ptr (SRAconst idx [c]) mem) -> (MOVWloadshiftRA ptr idx [c] mem)
+(MOVWloadidx (SRAconst idx [c]) ptr mem) -> (MOVWloadshiftRA ptr idx [c] mem)
+
+(MOVWstoreidx ptr (SLLconst idx [c]) val mem) -> (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstoreidx (SLLconst idx [c]) ptr val mem) -> (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstoreidx ptr (SRLconst idx [c]) val mem) -> (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstoreidx (SRLconst idx [c]) ptr val mem) -> (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstoreidx ptr (SRAconst idx [c]) val mem) -> (MOVWstoreshiftRA ptr idx [c] val mem)
+(MOVWstoreidx (SRAconst idx [c]) ptr val mem) -> (MOVWstoreshiftRA ptr idx [c] val mem)
+
+(MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) -> (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem)
+(MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem) -> (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem)
+(MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem) -> (MOVWload [int64(int32(c)>>uint64(d))] ptr mem)
+
+(MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem) -> (MOVWstore [int64(uint32(c)<<uint64(d))] ptr val mem)
+(MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem) -> (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem)
+(MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem) -> (MOVWstore [int64(int32(c)>>uint64(d))] ptr val mem)
 
 // generic simplifications
 (ADD x (RSBconst [0] y)) -> (SUB x y)
+(ADD (RSBconst [0] y) x) -> (SUB x y)
 (SUB x x) -> (MOVWconst [0])
+(RSB x x) -> (MOVWconst [0])
 (AND x x) -> x
 (OR x x) -> x
 (XOR x x) -> (MOVWconst [0])
 (ADD a (MUL x y)) -> (MULA x y a)
 
 (AND x (MVN y)) -> (BIC x y)
+
+// simplification with *shift ops
+(SUBshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(SUBshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(SUBshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(RSBshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(RSBshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(RSBshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(ANDshiftLL x y:(SLLconst x [c]) [d]) && c==d -> y
+(ANDshiftRL x y:(SRLconst x [c]) [d]) && c==d -> y
+(ANDshiftRA x y:(SRAconst x [c]) [d]) && c==d -> y
+(ORshiftLL x y:(SLLconst x [c]) [d]) && c==d -> y
+(ORshiftRL x y:(SRLconst x [c]) [d]) && c==d -> y
+(ORshiftRA x y:(SRAconst x [c]) [d]) && c==d -> y
+(XORshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(XORshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(XORshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(BICshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(BICshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(BICshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(AND x (MVNshiftLL y [c])) -> (BICshiftLL x y [c])
+(AND x (MVNshiftRL y [c])) -> (BICshiftRL x y [c])
+(AND x (MVNshiftRA y [c])) -> (BICshiftRA x y [c])
index a58bdf8b58dfe4990c99728db5f63a7a2d48df91..89576daf0eda31ffdb77d12592c3bb423bbd4ad1 100644 (file)
@@ -110,8 +110,13 @@ func init() {
                gp2flags  = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{flags}}
                gp2flags1 = regInfo{inputs: []regMask{gp, gp, flags}, outputs: []regMask{gp}}
                gp31      = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+               gp31cf    = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}, clobbers: flags} // cf: clobbers flags
+               gp3flags  = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{flags}}
+               gp3flags1 = regInfo{inputs: []regMask{gp, gp, gp, flags}, outputs: []regMask{gp}}
                gpload    = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
                gpstore   = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{}}
+               gp2load   = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+               gp2store  = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{}}
                fp01      = regInfo{inputs: []regMask{}, outputs: []regMask{fp}}
                fp11      = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
                fpgp      = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
@@ -176,14 +181,105 @@ func init() {
                {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64
 
                // shifts
-               {name: "SLL", argLength: 2, reg: gp21cf, asm: "SLL"},                  // arg0 << arg1, results 0 for large shift
+               {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"},                    // arg0 << arg1, shift amount is mod 256
                {name: "SLLconst", argLength: 1, reg: gp11, asm: "SLL", aux: "Int32"}, // arg0 << auxInt
-               {name: "SRL", argLength: 2, reg: gp21cf, asm: "SRL"},                  // arg0 >> arg1, unsigned, results 0 for large shift
+               {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"},                    // arg0 >> arg1, unsigned, shift amount is mod 256
                {name: "SRLconst", argLength: 1, reg: gp11, asm: "SRL", aux: "Int32"}, // arg0 >> auxInt, unsigned
-               {name: "SRA", argLength: 2, reg: gp21cf, asm: "SRA"},                  // arg0 >> arg1, signed, results 0/-1 for large shift
+               {name: "SRA", argLength: 2, reg: gp21cf, asm: "SRA"},                  // arg0 >> arg1, signed, shift amount is mod 256
                {name: "SRAconst", argLength: 1, reg: gp11, asm: "SRA", aux: "Int32"}, // arg0 >> auxInt, signed
                {name: "SRRconst", argLength: 1, reg: gp11, aux: "Int32"},             // arg0 right rotate by auxInt bits
 
+               {name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1<<auxInt
+               {name: "ADDshiftRL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, unsigned shift
+               {name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, signed shift
+               {name: "SUBshiftLL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1<<auxInt
+               {name: "SUBshiftRL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, unsigned shift
+               {name: "SUBshiftRA", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, signed shift
+               {name: "RSBshiftLL", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1<<auxInt - arg0
+               {name: "RSBshiftRL", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, unsigned shift
+               {name: "RSBshiftRA", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, signed shift
+               {name: "ANDshiftLL", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1<<auxInt)
+               {name: "ANDshiftRL", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1>>auxInt), unsigned shift
+               {name: "ANDshiftRA", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1>>auxInt), signed shift
+               {name: "ORshiftLL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"},  // arg0 | arg1<<auxInt
+               {name: "ORshiftRL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"},  // arg0 | arg1>>auxInt, unsigned shift
+               {name: "ORshiftRA", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"},  // arg0 | arg1>>auxInt, signed shift
+               {name: "XORshiftLL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1<<auxInt
+               {name: "XORshiftRL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1>>auxInt, unsigned shift
+               {name: "XORshiftRA", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1>>auxInt, signed shift
+               {name: "BICshiftLL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1<<auxInt)
+               {name: "BICshiftRL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1>>auxInt), unsigned shift
+               {name: "BICshiftRA", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1>>auxInt), signed shift
+               {name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0<<auxInt)
+               {name: "MVNshiftRL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0>>auxInt), unsigned shift
+               {name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0>>auxInt), signed shift
+
+               {name: "ADCshiftLL", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1<<auxInt + carry, arg2=flags
+               {name: "ADCshiftRL", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1>>auxInt + carry, unsigned shift, arg2=flags
+               {name: "ADCshiftRA", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1>>auxInt + carry, signed shift, arg2=flags
+               {name: "SBCshiftLL", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1<<auxInt - carry, arg2=flags
+               {name: "SBCshiftRL", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1>>auxInt - carry, unsigned shift, arg2=flags
+               {name: "SBCshiftRA", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1>>auxInt - carry, signed shift, arg2=flags
+               {name: "RSCshiftLL", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1<<auxInt - arg0 - carry, arg2=flags
+               {name: "RSCshiftRL", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1>>auxInt - arg0 - carry, unsigned shift, arg2=flags
+               {name: "RSCshiftRA", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1>>auxInt - arg0 - carry, signed shift, arg2=flags
+
+               {name: "ADDSshiftLL", argLength: 2, reg: gp21cf, asm: "ADD", aux: "Int32"}, // arg0 + arg1<<auxInt, set carry flag
+               {name: "ADDSshiftRL", argLength: 2, reg: gp21cf, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, unsigned shift, set carry flag
+               {name: "ADDSshiftRA", argLength: 2, reg: gp21cf, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, signed shift, set carry flag
+               {name: "SUBSshiftLL", argLength: 2, reg: gp21cf, asm: "SUB", aux: "Int32"}, // arg0 - arg1<<auxInt, set carry flag
+               {name: "SUBSshiftRL", argLength: 2, reg: gp21cf, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, unsigned shift, set carry flag
+               {name: "SUBSshiftRA", argLength: 2, reg: gp21cf, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, signed shift, set carry flag
+               {name: "RSBSshiftLL", argLength: 2, reg: gp21cf, asm: "RSB", aux: "Int32"}, // arg1<<auxInt - arg0, set carry flag
+               {name: "RSBSshiftRL", argLength: 2, reg: gp21cf, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, unsigned shift, set carry flag
+               {name: "RSBSshiftRA", argLength: 2, reg: gp21cf, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, signed shift, set carry flag
+
+               {name: "ADDshiftLLreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1<<arg2
+               {name: "ADDshiftRLreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1>>arg2, unsigned shift
+               {name: "ADDshiftRAreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1>>arg2, signed shift
+               {name: "SUBshiftLLreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1<<arg2
+               {name: "SUBshiftRLreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1>>arg2, unsigned shift
+               {name: "SUBshiftRAreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1>>arg2, signed shift
+               {name: "RSBshiftLLreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1<<arg2 - arg0
+               {name: "RSBshiftRLreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1>>arg2 - arg0, unsigned shift
+               {name: "RSBshiftRAreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1>>arg2 - arg0, signed shift
+               {name: "ANDshiftLLreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1<<arg2)
+               {name: "ANDshiftRLreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1>>arg2), unsigned shift
+               {name: "ANDshiftRAreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1>>arg2), signed shift
+               {name: "ORshiftLLreg", argLength: 3, reg: gp31, asm: "ORR"},  // arg0 | arg1<<arg2
+               {name: "ORshiftRLreg", argLength: 3, reg: gp31, asm: "ORR"},  // arg0 | arg1>>arg2, unsigned shift
+               {name: "ORshiftRAreg", argLength: 3, reg: gp31, asm: "ORR"},  // arg0 | arg1>>arg2, signed shift
+               {name: "XORshiftLLreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1<<arg2
+               {name: "XORshiftRLreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1>>arg2, unsigned shift
+               {name: "XORshiftRAreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1>>arg2, signed shift
+               {name: "BICshiftLLreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1<<arg2)
+               {name: "BICshiftRLreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1>>arg2), unsigned shift
+               {name: "BICshiftRAreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1>>arg2), signed shift
+               {name: "MVNshiftLLreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0<<arg1)
+               {name: "MVNshiftRLreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0>>arg1), unsigned shift
+               {name: "MVNshiftRAreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0>>arg1), signed shift
+
+               {name: "ADCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1<<arg2 + carry, arg3=flags
+               {name: "ADCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1>>arg2 + carry, unsigned shift, arg3=flags
+               {name: "ADCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1>>arg2 + carry, signed shift, arg3=flags
+               {name: "SBCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1<<arg2 - carry, arg3=flags
+               {name: "SBCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1>>arg2 - carry, unsigned shift, arg3=flags
+               {name: "SBCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1>>arg2 - carry, signed shift, arg3=flags
+               {name: "RSCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1<<arg2 - arg0 - carry, arg3=flags
+               {name: "RSCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1>>arg2 - arg0 - carry, unsigned shift, arg3=flags
+               {name: "RSCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1>>arg2 - arg0 - carry, signed shift, arg3=flags
+
+               {name: "ADDSshiftLLreg", argLength: 3, reg: gp31cf, asm: "ADD"}, // arg0 + arg1<<arg2, set carry flag
+               {name: "ADDSshiftRLreg", argLength: 3, reg: gp31cf, asm: "ADD"}, // arg0 + arg1>>arg2, unsigned shift, set carry flag
+               {name: "ADDSshiftRAreg", argLength: 3, reg: gp31cf, asm: "ADD"}, // arg0 + arg1>>arg2, signed shift, set carry flag
+               {name: "SUBSshiftLLreg", argLength: 3, reg: gp31cf, asm: "SUB"}, // arg0 - arg1<<arg2, set carry flag
+               {name: "SUBSshiftRLreg", argLength: 3, reg: gp31cf, asm: "SUB"}, // arg0 - arg1>>arg2, unsigned shift, set carry flag
+               {name: "SUBSshiftRAreg", argLength: 3, reg: gp31cf, asm: "SUB"}, // arg0 - arg1>>arg2, signed shift, set carry flag
+               {name: "RSBSshiftLLreg", argLength: 3, reg: gp31cf, asm: "RSB"}, // arg1<<arg2 - arg0, set carry flag
+               {name: "RSBSshiftRLreg", argLength: 3, reg: gp31cf, asm: "RSB"}, // arg1>>arg2 - arg0, unsigned shift, set carry flag
+               {name: "RSBSshiftRAreg", argLength: 3, reg: gp31cf, asm: "RSB"}, // arg1>>arg2 - arg0, signed shift, set carry flag
+
+               // comparisons
                {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"},                    // arg0 compare to arg1
                {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt
                {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags"},                    // arg0 compare to -arg1
@@ -195,6 +291,15 @@ func init() {
                {name: "CMPF", argLength: 2, reg: fp2flags, asm: "CMPF", typ: "Flags"},                  // arg0 compare to arg1, float32
                {name: "CMPD", argLength: 2, reg: fp2flags, asm: "CMPD", typ: "Flags"},                  // arg0 compare to arg1, float64
 
+               {name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1<<auxInt
+               {name: "CMPshiftRL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1>>auxInt, unsigned shift
+               {name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift
+
+               {name: "CMPshiftLLreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1<<arg2
+               {name: "CMPshiftRLreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1>>arg2, unsigned shift
+               {name: "CMPshiftRAreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1>>arg2, signed shift
+
+               // moves
                {name: "MOVWconst", argLength: 0, reg: gp01, aux: "Int32", asm: "MOVW", typ: "UInt32", rematerializeable: true},    // 32 low bits of auxint
                {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
                {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
@@ -215,6 +320,16 @@ func init() {
                {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem"}, // store 4 bytes of arg1 to arg0 + auxInt + aux.  arg2=mem.
                {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg1 to arg0 + auxInt + aux.  arg2=mem.
 
+               {name: "MOVWloadidx", argLength: 3, reg: gp2load, asm: "MOVW"},                   // load from arg0 + arg1. arg2=mem
+               {name: "MOVWloadshiftLL", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32"}, // load from arg0 + arg1<<auxInt. arg2=mem
+               {name: "MOVWloadshiftRL", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32"}, // load from arg0 + arg1>>auxInt, unsigned shift. arg2=mem
+               {name: "MOVWloadshiftRA", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32"}, // load from arg0 + arg1>>auxInt, signed shift. arg2=mem
+
+               {name: "MOVWstoreidx", argLength: 4, reg: gp2store, asm: "MOVW"},                   // store arg2 to arg0 + arg1. arg3=mem
+               {name: "MOVWstoreshiftLL", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32"}, // store arg2 to arg0 + arg1<<auxInt. arg3=mem
+               {name: "MOVWstoreshiftRL", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32"}, // store arg2 to arg0 + arg1>>auxInt, unsigned shift. arg3=mem
+               {name: "MOVWstoreshiftRA", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32"}, // store arg2 to arg0 + arg1>>auxInt, signed shift. arg3=mem
+
                {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVBS"},  // move from arg0, sign-extended from byte
                {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
                {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVHS"},  // move from arg0, sign-extended from half
@@ -232,6 +347,12 @@ func init() {
                {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"},  // float32 -> float64
                {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"},  // float64 -> float32
 
+               // conditional instructions, for lowering shifts
+               {name: "CMOVWHSconst", argLength: 2, reg: gp1flags1, asm: "MOVW", aux: "Int32", resultInArg0: true}, // replace arg0 w/ const if flags indicates HS, arg1=flags
+               {name: "CMOVWLSconst", argLength: 2, reg: gp1flags1, asm: "MOVW", aux: "Int32", resultInArg0: true}, // replace arg0 w/ const if flags indicates LS, arg1=flags
+               {name: "SRAcond", argLength: 3, reg: gp2flags1, asm: "SRA"},                                         // arg0 >> 31 if flags indicates HS, arg0 >> arg1 otherwise, signed shift, arg2=flags
+
+               // function calls
                {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff"},                                             // call static function aux.(*gc.Sym).  arg0=mem, auxint=argsize, returns mem
                {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R7"), 0}, clobbers: callerSave}, aux: "Int64"}, // call function via closure.  arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
                {name: "CALLdefer", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64"},                                               // call deferproc.  arg0=mem, auxint=argsize, returns mem
@@ -256,8 +377,6 @@ func init() {
                {name: "LoweredSelect0", argLength: 1, reg: regInfo{inputs: []regMask{}, outputs: []regMask{buildReg("R0")}}},           // the first component of a tuple, implicitly in R0, arg0=tuple
                {name: "LoweredSelect1", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // the second component of a tuple, arg0=tuple
 
-               {name: "LoweredZeromask", argLength: 1, reg: gp11}, // 0 if arg0 == 1, 0xffffffff if arg0 != 0
-
                // duffzero (must be 4-byte aligned)
                // arg0 = address of memory to zero (in R1, changed as side effect)
                // arg1 = value to store (always zero)
index 07c439de4b2ab15ac153d6baba99a483ab75f12d..a7c2b6e3de5a7a4b0143d86e6be063396841d432 100644 (file)
@@ -157,7 +157,13 @@ func (op Op) isTupleGenerator() bool {
        switch op {
        case OpAdd32carry, OpSub32carry, OpMul32uhilo,
                OpARMADDS, OpARMSUBS, OpARMMULLU,
-               OpARMADDSconst, OpARMSUBSconst, OpARMRSBSconst:
+               OpARMADDSconst, OpARMSUBSconst, OpARMRSBSconst,
+               OpARMADDSshiftLL, OpARMSUBSshiftLL, OpARMRSBSshiftLL,
+               OpARMADDSshiftRL, OpARMSUBSshiftRL, OpARMRSBSshiftRL,
+               OpARMADDSshiftRA, OpARMSUBSshiftRA, OpARMRSBSshiftRA,
+               OpARMADDSshiftLLreg, OpARMSUBSshiftLLreg, OpARMRSBSshiftLLreg,
+               OpARMADDSshiftRLreg, OpARMSUBSshiftRLreg, OpARMRSBSshiftRLreg,
+               OpARMADDSshiftRAreg, OpARMSUBSshiftRAreg, OpARMRSBSshiftRAreg:
                return true
        }
        return false
index 7ea67a99b952b66b1ca11e47555456aeb5e51e02..159e1b26b419afa6f4b977f583cb6a0132cb10f5 100644 (file)
@@ -596,6 +596,90 @@ const (
        OpARMSRA
        OpARMSRAconst
        OpARMSRRconst
+       OpARMADDshiftLL
+       OpARMADDshiftRL
+       OpARMADDshiftRA
+       OpARMSUBshiftLL
+       OpARMSUBshiftRL
+       OpARMSUBshiftRA
+       OpARMRSBshiftLL
+       OpARMRSBshiftRL
+       OpARMRSBshiftRA
+       OpARMANDshiftLL
+       OpARMANDshiftRL
+       OpARMANDshiftRA
+       OpARMORshiftLL
+       OpARMORshiftRL
+       OpARMORshiftRA
+       OpARMXORshiftLL
+       OpARMXORshiftRL
+       OpARMXORshiftRA
+       OpARMBICshiftLL
+       OpARMBICshiftRL
+       OpARMBICshiftRA
+       OpARMMVNshiftLL
+       OpARMMVNshiftRL
+       OpARMMVNshiftRA
+       OpARMADCshiftLL
+       OpARMADCshiftRL
+       OpARMADCshiftRA
+       OpARMSBCshiftLL
+       OpARMSBCshiftRL
+       OpARMSBCshiftRA
+       OpARMRSCshiftLL
+       OpARMRSCshiftRL
+       OpARMRSCshiftRA
+       OpARMADDSshiftLL
+       OpARMADDSshiftRL
+       OpARMADDSshiftRA
+       OpARMSUBSshiftLL
+       OpARMSUBSshiftRL
+       OpARMSUBSshiftRA
+       OpARMRSBSshiftLL
+       OpARMRSBSshiftRL
+       OpARMRSBSshiftRA
+       OpARMADDshiftLLreg
+       OpARMADDshiftRLreg
+       OpARMADDshiftRAreg
+       OpARMSUBshiftLLreg
+       OpARMSUBshiftRLreg
+       OpARMSUBshiftRAreg
+       OpARMRSBshiftLLreg
+       OpARMRSBshiftRLreg
+       OpARMRSBshiftRAreg
+       OpARMANDshiftLLreg
+       OpARMANDshiftRLreg
+       OpARMANDshiftRAreg
+       OpARMORshiftLLreg
+       OpARMORshiftRLreg
+       OpARMORshiftRAreg
+       OpARMXORshiftLLreg
+       OpARMXORshiftRLreg
+       OpARMXORshiftRAreg
+       OpARMBICshiftLLreg
+       OpARMBICshiftRLreg
+       OpARMBICshiftRAreg
+       OpARMMVNshiftLLreg
+       OpARMMVNshiftRLreg
+       OpARMMVNshiftRAreg
+       OpARMADCshiftLLreg
+       OpARMADCshiftRLreg
+       OpARMADCshiftRAreg
+       OpARMSBCshiftLLreg
+       OpARMSBCshiftRLreg
+       OpARMSBCshiftRAreg
+       OpARMRSCshiftLLreg
+       OpARMRSCshiftRLreg
+       OpARMRSCshiftRAreg
+       OpARMADDSshiftLLreg
+       OpARMADDSshiftRLreg
+       OpARMADDSshiftRAreg
+       OpARMSUBSshiftLLreg
+       OpARMSUBSshiftRLreg
+       OpARMSUBSshiftRAreg
+       OpARMRSBSshiftLLreg
+       OpARMRSBSshiftRLreg
+       OpARMRSBSshiftRAreg
        OpARMCMP
        OpARMCMPconst
        OpARMCMN
@@ -606,6 +690,12 @@ const (
        OpARMTEQconst
        OpARMCMPF
        OpARMCMPD
+       OpARMCMPshiftLL
+       OpARMCMPshiftRL
+       OpARMCMPshiftRA
+       OpARMCMPshiftLLreg
+       OpARMCMPshiftRLreg
+       OpARMCMPshiftRAreg
        OpARMMOVWconst
        OpARMMOVFconst
        OpARMMOVDconst
@@ -622,6 +712,14 @@ const (
        OpARMMOVWstore
        OpARMMOVFstore
        OpARMMOVDstore
+       OpARMMOVWloadidx
+       OpARMMOVWloadshiftLL
+       OpARMMOVWloadshiftRL
+       OpARMMOVWloadshiftRA
+       OpARMMOVWstoreidx
+       OpARMMOVWstoreshiftLL
+       OpARMMOVWstoreshiftRL
+       OpARMMOVWstoreshiftRA
        OpARMMOVBreg
        OpARMMOVBUreg
        OpARMMOVHreg
@@ -637,6 +735,9 @@ const (
        OpARMMOVDWU
        OpARMMOVFD
        OpARMMOVDF
+       OpARMCMOVWHSconst
+       OpARMCMOVWLSconst
+       OpARMSRAcond
        OpARMCALLstatic
        OpARMCALLclosure
        OpARMCALLdefer
@@ -656,7 +757,6 @@ const (
        OpARMCarry
        OpARMLoweredSelect0
        OpARMLoweredSelect1
-       OpARMLoweredZeromask
        OpARMDUFFZERO
        OpARMDUFFCOPY
        OpARMLoweredZero
@@ -7155,7 +7255,6 @@ var opcodeTable = [...]opInfo{
                                {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
                                {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
                        },
-                       clobbers: 4294967296, // FLAGS
                        outputs: []regMask{
                                5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
                        },
@@ -7184,7 +7283,6 @@ var opcodeTable = [...]opInfo{
                                {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
                                {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
                        },
-                       clobbers: 4294967296, // FLAGS
                        outputs: []regMask{
                                5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
                        },
@@ -7220,27 +7318,1317 @@ var opcodeTable = [...]opInfo{
                },
        },
        {
-               name:    "SRAconst",
-               auxType: auxInt32,
-               argLen:  1,
-               asm:     arm.ASRA,
+               name:    "SRAconst",
+               auxType: auxInt32,
+               argLen:  1,
+               asm:     arm.ASRA,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "SRRconst",
+               auxType: auxInt32,
+               argLen:  1,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "ADDshiftLL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.AADD,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "ADDshiftRL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.AADD,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "ADDshiftRA",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.AADD,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "SUBshiftLL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ASUB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "SUBshiftRL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ASUB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "SUBshiftRA",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ASUB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "RSBshiftLL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ARSB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "RSBshiftRL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ARSB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "RSBshiftRA",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ARSB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "ANDshiftLL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.AAND,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "ANDshiftRL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.AAND,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "ANDshiftRA",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.AAND,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "ORshiftLL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.AORR,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "ORshiftRL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.AORR,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "ORshiftRA",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.AORR,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "XORshiftLL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.AEOR,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "XORshiftRL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.AEOR,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "XORshiftRA",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.AEOR,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "BICshiftLL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ABIC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "BICshiftRL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ABIC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "BICshiftRA",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ABIC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "MVNshiftLL",
+               auxType: auxInt32,
+               argLen:  1,
+               asm:     arm.AMVN,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "MVNshiftRL",
+               auxType: auxInt32,
+               argLen:  1,
+               asm:     arm.AMVN,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "MVNshiftRA",
+               auxType: auxInt32,
+               argLen:  1,
+               asm:     arm.AMVN,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "ADCshiftLL",
+               auxType: auxInt32,
+               argLen:  3,
+               asm:     arm.AADC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {2, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "ADCshiftRL",
+               auxType: auxInt32,
+               argLen:  3,
+               asm:     arm.AADC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {2, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "ADCshiftRA",
+               auxType: auxInt32,
+               argLen:  3,
+               asm:     arm.AADC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {2, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "SBCshiftLL",
+               auxType: auxInt32,
+               argLen:  3,
+               asm:     arm.ASBC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {2, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "SBCshiftRL",
+               auxType: auxInt32,
+               argLen:  3,
+               asm:     arm.ASBC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {2, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "SBCshiftRA",
+               auxType: auxInt32,
+               argLen:  3,
+               asm:     arm.ASBC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {2, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "RSCshiftLL",
+               auxType: auxInt32,
+               argLen:  3,
+               asm:     arm.ARSC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {2, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "RSCshiftRL",
+               auxType: auxInt32,
+               argLen:  3,
+               asm:     arm.ARSC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {2, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "RSCshiftRA",
+               auxType: auxInt32,
+               argLen:  3,
+               asm:     arm.ARSC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {2, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "ADDSshiftLL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.AADD,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       clobbers: 4294967296, // FLAGS
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "ADDSshiftRL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.AADD,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       clobbers: 4294967296, // FLAGS
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "ADDSshiftRA",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.AADD,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       clobbers: 4294967296, // FLAGS
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "SUBSshiftLL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ASUB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       clobbers: 4294967296, // FLAGS
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "SUBSshiftRL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ASUB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       clobbers: 4294967296, // FLAGS
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "SUBSshiftRA",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ASUB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       clobbers: 4294967296, // FLAGS
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "RSBSshiftLL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ARSB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       clobbers: 4294967296, // FLAGS
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "RSBSshiftRL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ARSB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       clobbers: 4294967296, // FLAGS
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "RSBSshiftRA",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ARSB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       clobbers: 4294967296, // FLAGS
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "ADDshiftLLreg",
+               argLen: 3,
+               asm:    arm.AADD,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "ADDshiftRLreg",
+               argLen: 3,
+               asm:    arm.AADD,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "ADDshiftRAreg",
+               argLen: 3,
+               asm:    arm.AADD,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "SUBshiftLLreg",
+               argLen: 3,
+               asm:    arm.ASUB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "SUBshiftRLreg",
+               argLen: 3,
+               asm:    arm.ASUB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "SUBshiftRAreg",
+               argLen: 3,
+               asm:    arm.ASUB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "RSBshiftLLreg",
+               argLen: 3,
+               asm:    arm.ARSB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "RSBshiftRLreg",
+               argLen: 3,
+               asm:    arm.ARSB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "RSBshiftRAreg",
+               argLen: 3,
+               asm:    arm.ARSB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "ANDshiftLLreg",
+               argLen: 3,
+               asm:    arm.AAND,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "ANDshiftRLreg",
+               argLen: 3,
+               asm:    arm.AAND,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "ANDshiftRAreg",
+               argLen: 3,
+               asm:    arm.AAND,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "ORshiftLLreg",
+               argLen: 3,
+               asm:    arm.AORR,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "ORshiftRLreg",
+               argLen: 3,
+               asm:    arm.AORR,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "ORshiftRAreg",
+               argLen: 3,
+               asm:    arm.AORR,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "XORshiftLLreg",
+               argLen: 3,
+               asm:    arm.AEOR,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "XORshiftRLreg",
+               argLen: 3,
+               asm:    arm.AEOR,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "XORshiftRAreg",
+               argLen: 3,
+               asm:    arm.AEOR,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "BICshiftLLreg",
+               argLen: 3,
+               asm:    arm.ABIC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "BICshiftRLreg",
+               argLen: 3,
+               asm:    arm.ABIC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "BICshiftRAreg",
+               argLen: 3,
+               asm:    arm.ABIC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "MVNshiftLLreg",
+               argLen: 2,
+               asm:    arm.AMVN,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "MVNshiftRLreg",
+               argLen: 2,
+               asm:    arm.AMVN,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "MVNshiftRAreg",
+               argLen: 2,
+               asm:    arm.AMVN,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "ADCshiftLLreg",
+               argLen: 4,
+               asm:    arm.AADC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {3, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "ADCshiftRLreg",
+               argLen: 4,
+               asm:    arm.AADC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {3, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "ADCshiftRAreg",
+               argLen: 4,
+               asm:    arm.AADC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {3, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "SBCshiftLLreg",
+               argLen: 4,
+               asm:    arm.ASBC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {3, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "SBCshiftRLreg",
+               argLen: 4,
+               asm:    arm.ASBC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {3, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "SBCshiftRAreg",
+               argLen: 4,
+               asm:    arm.ASBC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {3, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "RSCshiftLLreg",
+               argLen: 4,
+               asm:    arm.ARSC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {3, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "RSCshiftRLreg",
+               argLen: 4,
+               asm:    arm.ARSC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {3, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "RSCshiftRAreg",
+               argLen: 4,
+               asm:    arm.ARSC,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {3, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "ADDSshiftLLreg",
+               argLen: 3,
+               asm:    arm.AADD,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       clobbers: 4294967296, // FLAGS
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "ADDSshiftRLreg",
+               argLen: 3,
+               asm:    arm.AADD,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       clobbers: 4294967296, // FLAGS
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "ADDSshiftRAreg",
+               argLen: 3,
+               asm:    arm.AADD,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       clobbers: 4294967296, // FLAGS
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "SUBSshiftLLreg",
+               argLen: 3,
+               asm:    arm.ASUB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       clobbers: 4294967296, // FLAGS
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "SUBSshiftRLreg",
+               argLen: 3,
+               asm:    arm.ASUB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       clobbers: 4294967296, // FLAGS
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "SUBSshiftRAreg",
+               argLen: 3,
+               asm:    arm.ASUB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       clobbers: 4294967296, // FLAGS
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "RSBSshiftLLreg",
+               argLen: 3,
+               asm:    arm.ARSB,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       clobbers: 4294967296, // FLAGS
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "RSBSshiftRLreg",
+               argLen: 3,
+               asm:    arm.ARSB,
                reg: regInfo{
                        inputs: []inputInfo{
-                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
                        },
+                       clobbers: 4294967296, // FLAGS
                        outputs: []regMask{
                                5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
                        },
                },
        },
        {
-               name:    "SRRconst",
-               auxType: auxInt32,
-               argLen:  1,
+               name:   "RSBSshiftRAreg",
+               argLen: 3,
+               asm:    arm.ARSB,
                reg: regInfo{
                        inputs: []inputInfo{
-                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
                        },
+                       clobbers: 4294967296, // FLAGS
                        outputs: []regMask{
                                5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
                        },
@@ -7388,6 +8776,96 @@ var opcodeTable = [...]opInfo{
                        },
                },
        },
+       {
+               name:    "CMPshiftLL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ACMP,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               4294967296, // FLAGS
+                       },
+               },
+       },
+       {
+               name:    "CMPshiftRL",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ACMP,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               4294967296, // FLAGS
+                       },
+               },
+       },
+       {
+               name:    "CMPshiftRA",
+               auxType: auxInt32,
+               argLen:  2,
+               asm:     arm.ACMP,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                       },
+                       outputs: []regMask{
+                               4294967296, // FLAGS
+                       },
+               },
+       },
+       {
+               name:   "CMPshiftLLreg",
+               argLen: 3,
+               asm:    arm.ACMP,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               4294967296, // FLAGS
+                       },
+               },
+       },
+       {
+               name:   "CMPshiftRLreg",
+               argLen: 3,
+               asm:    arm.ACMP,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               4294967296, // FLAGS
+                       },
+               },
+       },
+       {
+               name:   "CMPshiftRAreg",
+               argLen: 3,
+               asm:    arm.ACMP,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               4294967296, // FLAGS
+                       },
+               },
+       },
        {
                name:              "MOVWconst",
                auxType:           auxInt32,
@@ -7597,6 +9075,116 @@ var opcodeTable = [...]opInfo{
                        },
                },
        },
+       {
+               name:   "MOVWloadidx",
+               argLen: 3,
+               asm:    arm.AMOVW,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {1, 6143},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {0, 8589948927}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "MOVWloadshiftLL",
+               auxType: auxInt32,
+               argLen:  3,
+               asm:     arm.AMOVW,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {1, 6143},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {0, 8589948927}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "MOVWloadshiftRL",
+               auxType: auxInt32,
+               argLen:  3,
+               asm:     arm.AMOVW,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {1, 6143},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {0, 8589948927}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:    "MOVWloadshiftRA",
+               auxType: auxInt32,
+               argLen:  3,
+               asm:     arm.AMOVW,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {1, 6143},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {0, 8589948927}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "MOVWstoreidx",
+               argLen: 4,
+               asm:    arm.AMOVW,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {1, 6143},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {2, 6143},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {0, 8589948927}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+                       },
+               },
+       },
+       {
+               name:    "MOVWstoreshiftLL",
+               auxType: auxInt32,
+               argLen:  4,
+               asm:     arm.AMOVW,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {1, 6143},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {2, 6143},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {0, 8589948927}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+                       },
+               },
+       },
+       {
+               name:    "MOVWstoreshiftRL",
+               auxType: auxInt32,
+               argLen:  4,
+               asm:     arm.AMOVW,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {1, 6143},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {2, 6143},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {0, 8589948927}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+                       },
+               },
+       },
+       {
+               name:    "MOVWstoreshiftRA",
+               auxType: auxInt32,
+               argLen:  4,
+               asm:     arm.AMOVW,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {1, 6143},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {2, 6143},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+                               {0, 8589948927}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+                       },
+               },
+       },
        {
                name:   "MOVBreg",
                argLen: 1,
@@ -7792,6 +9380,53 @@ var opcodeTable = [...]opInfo{
                        },
                },
        },
+       {
+               name:         "CMOVWHSconst",
+               auxType:      auxInt32,
+               argLen:       2,
+               resultInArg0: true,
+               asm:          arm.AMOVW,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {1, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:         "CMOVWLSconst",
+               auxType:      auxInt32,
+               argLen:       2,
+               resultInArg0: true,
+               asm:          arm.AMOVW,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {1, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
+       {
+               name:   "SRAcond",
+               argLen: 3,
+               asm:    arm.ASRA,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {2, 4294967296}, // FLAGS
+                               {0, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                               {1, 5119},       // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+                       outputs: []regMask{
+                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+                       },
+               },
+       },
        {
                name:    "CALLstatic",
                auxType: auxSymOff,
@@ -7999,18 +9634,6 @@ var opcodeTable = [...]opInfo{
                        },
                },
        },
-       {
-               name:   "LoweredZeromask",
-               argLen: 1,
-               reg: regInfo{
-                       inputs: []inputInfo{
-                               {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
-                       },
-                       outputs: []regMask{
-                               5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
-                       },
-               },
-       },
        {
                name:    "DUFFZERO",
                auxType: auxInt64,
index c36976c9539ca770ae85e1dbef20252735671091..ceac5839efe0c6b4bb833aefeca7411e22979a14 100644 (file)
@@ -12,16 +12,64 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMADC(v, config)
        case OpARMADCconst:
                return rewriteValueARM_OpARMADCconst(v, config)
+       case OpARMADCshiftLL:
+               return rewriteValueARM_OpARMADCshiftLL(v, config)
+       case OpARMADCshiftLLreg:
+               return rewriteValueARM_OpARMADCshiftLLreg(v, config)
+       case OpARMADCshiftRA:
+               return rewriteValueARM_OpARMADCshiftRA(v, config)
+       case OpARMADCshiftRAreg:
+               return rewriteValueARM_OpARMADCshiftRAreg(v, config)
+       case OpARMADCshiftRL:
+               return rewriteValueARM_OpARMADCshiftRL(v, config)
+       case OpARMADCshiftRLreg:
+               return rewriteValueARM_OpARMADCshiftRLreg(v, config)
        case OpARMADD:
                return rewriteValueARM_OpARMADD(v, config)
        case OpARMADDS:
                return rewriteValueARM_OpARMADDS(v, config)
+       case OpARMADDSshiftLL:
+               return rewriteValueARM_OpARMADDSshiftLL(v, config)
+       case OpARMADDSshiftLLreg:
+               return rewriteValueARM_OpARMADDSshiftLLreg(v, config)
+       case OpARMADDSshiftRA:
+               return rewriteValueARM_OpARMADDSshiftRA(v, config)
+       case OpARMADDSshiftRAreg:
+               return rewriteValueARM_OpARMADDSshiftRAreg(v, config)
+       case OpARMADDSshiftRL:
+               return rewriteValueARM_OpARMADDSshiftRL(v, config)
+       case OpARMADDSshiftRLreg:
+               return rewriteValueARM_OpARMADDSshiftRLreg(v, config)
        case OpARMADDconst:
                return rewriteValueARM_OpARMADDconst(v, config)
+       case OpARMADDshiftLL:
+               return rewriteValueARM_OpARMADDshiftLL(v, config)
+       case OpARMADDshiftLLreg:
+               return rewriteValueARM_OpARMADDshiftLLreg(v, config)
+       case OpARMADDshiftRA:
+               return rewriteValueARM_OpARMADDshiftRA(v, config)
+       case OpARMADDshiftRAreg:
+               return rewriteValueARM_OpARMADDshiftRAreg(v, config)
+       case OpARMADDshiftRL:
+               return rewriteValueARM_OpARMADDshiftRL(v, config)
+       case OpARMADDshiftRLreg:
+               return rewriteValueARM_OpARMADDshiftRLreg(v, config)
        case OpARMAND:
                return rewriteValueARM_OpARMAND(v, config)
        case OpARMANDconst:
                return rewriteValueARM_OpARMANDconst(v, config)
+       case OpARMANDshiftLL:
+               return rewriteValueARM_OpARMANDshiftLL(v, config)
+       case OpARMANDshiftLLreg:
+               return rewriteValueARM_OpARMANDshiftLLreg(v, config)
+       case OpARMANDshiftRA:
+               return rewriteValueARM_OpARMANDshiftRA(v, config)
+       case OpARMANDshiftRAreg:
+               return rewriteValueARM_OpARMANDshiftRAreg(v, config)
+       case OpARMANDshiftRL:
+               return rewriteValueARM_OpARMANDshiftRL(v, config)
+       case OpARMANDshiftRLreg:
+               return rewriteValueARM_OpARMANDshiftRLreg(v, config)
        case OpAdd16:
                return rewriteValueARM_OpAdd16(v, config)
        case OpAdd32:
@@ -52,10 +100,38 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMBIC(v, config)
        case OpARMBICconst:
                return rewriteValueARM_OpARMBICconst(v, config)
+       case OpARMBICshiftLL:
+               return rewriteValueARM_OpARMBICshiftLL(v, config)
+       case OpARMBICshiftLLreg:
+               return rewriteValueARM_OpARMBICshiftLLreg(v, config)
+       case OpARMBICshiftRA:
+               return rewriteValueARM_OpARMBICshiftRA(v, config)
+       case OpARMBICshiftRAreg:
+               return rewriteValueARM_OpARMBICshiftRAreg(v, config)
+       case OpARMBICshiftRL:
+               return rewriteValueARM_OpARMBICshiftRL(v, config)
+       case OpARMBICshiftRLreg:
+               return rewriteValueARM_OpARMBICshiftRLreg(v, config)
+       case OpARMCMOVWHSconst:
+               return rewriteValueARM_OpARMCMOVWHSconst(v, config)
+       case OpARMCMOVWLSconst:
+               return rewriteValueARM_OpARMCMOVWLSconst(v, config)
        case OpARMCMP:
                return rewriteValueARM_OpARMCMP(v, config)
        case OpARMCMPconst:
                return rewriteValueARM_OpARMCMPconst(v, config)
+       case OpARMCMPshiftLL:
+               return rewriteValueARM_OpARMCMPshiftLL(v, config)
+       case OpARMCMPshiftLLreg:
+               return rewriteValueARM_OpARMCMPshiftLLreg(v, config)
+       case OpARMCMPshiftRA:
+               return rewriteValueARM_OpARMCMPshiftRA(v, config)
+       case OpARMCMPshiftRAreg:
+               return rewriteValueARM_OpARMCMPshiftRAreg(v, config)
+       case OpARMCMPshiftRL:
+               return rewriteValueARM_OpARMCMPshiftRL(v, config)
+       case OpARMCMPshiftRLreg:
+               return rewriteValueARM_OpARMCMPshiftRLreg(v, config)
        case OpClosureCall:
                return rewriteValueARM_OpClosureCall(v, config)
        case OpCom16:
@@ -244,8 +320,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMLessThanU(v, config)
        case OpLoad:
                return rewriteValueARM_OpLoad(v, config)
-       case OpARMLoweredZeromask:
-               return rewriteValueARM_OpARMLoweredZeromask(v, config)
        case OpLrot16:
                return rewriteValueARM_OpLrot16(v, config)
        case OpLrot32:
@@ -306,14 +380,44 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMMOVHstore(v, config)
        case OpARMMOVWload:
                return rewriteValueARM_OpARMMOVWload(v, config)
+       case OpARMMOVWloadidx:
+               return rewriteValueARM_OpARMMOVWloadidx(v, config)
+       case OpARMMOVWloadshiftLL:
+               return rewriteValueARM_OpARMMOVWloadshiftLL(v, config)
+       case OpARMMOVWloadshiftRA:
+               return rewriteValueARM_OpARMMOVWloadshiftRA(v, config)
+       case OpARMMOVWloadshiftRL:
+               return rewriteValueARM_OpARMMOVWloadshiftRL(v, config)
+       case OpARMMOVWreg:
+               return rewriteValueARM_OpARMMOVWreg(v, config)
        case OpARMMOVWstore:
                return rewriteValueARM_OpARMMOVWstore(v, config)
+       case OpARMMOVWstoreidx:
+               return rewriteValueARM_OpARMMOVWstoreidx(v, config)
+       case OpARMMOVWstoreshiftLL:
+               return rewriteValueARM_OpARMMOVWstoreshiftLL(v, config)
+       case OpARMMOVWstoreshiftRA:
+               return rewriteValueARM_OpARMMOVWstoreshiftRA(v, config)
+       case OpARMMOVWstoreshiftRL:
+               return rewriteValueARM_OpARMMOVWstoreshiftRL(v, config)
        case OpARMMUL:
                return rewriteValueARM_OpARMMUL(v, config)
        case OpARMMULA:
                return rewriteValueARM_OpARMMULA(v, config)
        case OpARMMVN:
                return rewriteValueARM_OpARMMVN(v, config)
+       case OpARMMVNshiftLL:
+               return rewriteValueARM_OpARMMVNshiftLL(v, config)
+       case OpARMMVNshiftLLreg:
+               return rewriteValueARM_OpARMMVNshiftLLreg(v, config)
+       case OpARMMVNshiftRA:
+               return rewriteValueARM_OpARMMVNshiftRA(v, config)
+       case OpARMMVNshiftRAreg:
+               return rewriteValueARM_OpARMMVNshiftRAreg(v, config)
+       case OpARMMVNshiftRL:
+               return rewriteValueARM_OpARMMVNshiftRL(v, config)
+       case OpARMMVNshiftRLreg:
+               return rewriteValueARM_OpARMMVNshiftRLreg(v, config)
        case OpMod16:
                return rewriteValueARM_OpMod16(v, config)
        case OpMod16u:
@@ -374,6 +478,18 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMOR(v, config)
        case OpARMORconst:
                return rewriteValueARM_OpARMORconst(v, config)
+       case OpARMORshiftLL:
+               return rewriteValueARM_OpARMORshiftLL(v, config)
+       case OpARMORshiftLLreg:
+               return rewriteValueARM_OpARMORshiftLLreg(v, config)
+       case OpARMORshiftRA:
+               return rewriteValueARM_OpARMORshiftRA(v, config)
+       case OpARMORshiftRAreg:
+               return rewriteValueARM_OpARMORshiftRAreg(v, config)
+       case OpARMORshiftRL:
+               return rewriteValueARM_OpARMORshiftRL(v, config)
+       case OpARMORshiftRLreg:
+               return rewriteValueARM_OpARMORshiftRLreg(v, config)
        case OpOffPtr:
                return rewriteValueARM_OpOffPtr(v, config)
        case OpOr16:
@@ -386,10 +502,46 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpOrB(v, config)
        case OpARMRSB:
                return rewriteValueARM_OpARMRSB(v, config)
+       case OpARMRSBSshiftLL:
+               return rewriteValueARM_OpARMRSBSshiftLL(v, config)
+       case OpARMRSBSshiftLLreg:
+               return rewriteValueARM_OpARMRSBSshiftLLreg(v, config)
+       case OpARMRSBSshiftRA:
+               return rewriteValueARM_OpARMRSBSshiftRA(v, config)
+       case OpARMRSBSshiftRAreg:
+               return rewriteValueARM_OpARMRSBSshiftRAreg(v, config)
+       case OpARMRSBSshiftRL:
+               return rewriteValueARM_OpARMRSBSshiftRL(v, config)
+       case OpARMRSBSshiftRLreg:
+               return rewriteValueARM_OpARMRSBSshiftRLreg(v, config)
        case OpARMRSBconst:
                return rewriteValueARM_OpARMRSBconst(v, config)
+       case OpARMRSBshiftLL:
+               return rewriteValueARM_OpARMRSBshiftLL(v, config)
+       case OpARMRSBshiftLLreg:
+               return rewriteValueARM_OpARMRSBshiftLLreg(v, config)
+       case OpARMRSBshiftRA:
+               return rewriteValueARM_OpARMRSBshiftRA(v, config)
+       case OpARMRSBshiftRAreg:
+               return rewriteValueARM_OpARMRSBshiftRAreg(v, config)
+       case OpARMRSBshiftRL:
+               return rewriteValueARM_OpARMRSBshiftRL(v, config)
+       case OpARMRSBshiftRLreg:
+               return rewriteValueARM_OpARMRSBshiftRLreg(v, config)
        case OpARMRSCconst:
                return rewriteValueARM_OpARMRSCconst(v, config)
+       case OpARMRSCshiftLL:
+               return rewriteValueARM_OpARMRSCshiftLL(v, config)
+       case OpARMRSCshiftLLreg:
+               return rewriteValueARM_OpARMRSCshiftLLreg(v, config)
+       case OpARMRSCshiftRA:
+               return rewriteValueARM_OpARMRSCshiftRA(v, config)
+       case OpARMRSCshiftRAreg:
+               return rewriteValueARM_OpARMRSCshiftRAreg(v, config)
+       case OpARMRSCshiftRL:
+               return rewriteValueARM_OpARMRSCshiftRL(v, config)
+       case OpARMRSCshiftRLreg:
+               return rewriteValueARM_OpARMRSCshiftRLreg(v, config)
        case OpRsh16Ux16:
                return rewriteValueARM_OpRsh16Ux16(v, config)
        case OpRsh16Ux32:
@@ -442,12 +594,26 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMSBC(v, config)
        case OpARMSBCconst:
                return rewriteValueARM_OpARMSBCconst(v, config)
+       case OpARMSBCshiftLL:
+               return rewriteValueARM_OpARMSBCshiftLL(v, config)
+       case OpARMSBCshiftLLreg:
+               return rewriteValueARM_OpARMSBCshiftLLreg(v, config)
+       case OpARMSBCshiftRA:
+               return rewriteValueARM_OpARMSBCshiftRA(v, config)
+       case OpARMSBCshiftRAreg:
+               return rewriteValueARM_OpARMSBCshiftRAreg(v, config)
+       case OpARMSBCshiftRL:
+               return rewriteValueARM_OpARMSBCshiftRL(v, config)
+       case OpARMSBCshiftRLreg:
+               return rewriteValueARM_OpARMSBCshiftRLreg(v, config)
        case OpARMSLL:
                return rewriteValueARM_OpARMSLL(v, config)
        case OpARMSLLconst:
                return rewriteValueARM_OpARMSLLconst(v, config)
        case OpARMSRA:
                return rewriteValueARM_OpARMSRA(v, config)
+       case OpARMSRAcond:
+               return rewriteValueARM_OpARMSRAcond(v, config)
        case OpARMSRAconst:
                return rewriteValueARM_OpARMSRAconst(v, config)
        case OpARMSRL:
@@ -458,8 +624,32 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMSUB(v, config)
        case OpARMSUBS:
                return rewriteValueARM_OpARMSUBS(v, config)
+       case OpARMSUBSshiftLL:
+               return rewriteValueARM_OpARMSUBSshiftLL(v, config)
+       case OpARMSUBSshiftLLreg:
+               return rewriteValueARM_OpARMSUBSshiftLLreg(v, config)
+       case OpARMSUBSshiftRA:
+               return rewriteValueARM_OpARMSUBSshiftRA(v, config)
+       case OpARMSUBSshiftRAreg:
+               return rewriteValueARM_OpARMSUBSshiftRAreg(v, config)
+       case OpARMSUBSshiftRL:
+               return rewriteValueARM_OpARMSUBSshiftRL(v, config)
+       case OpARMSUBSshiftRLreg:
+               return rewriteValueARM_OpARMSUBSshiftRLreg(v, config)
        case OpARMSUBconst:
                return rewriteValueARM_OpARMSUBconst(v, config)
+       case OpARMSUBshiftLL:
+               return rewriteValueARM_OpARMSUBshiftLL(v, config)
+       case OpARMSUBshiftLLreg:
+               return rewriteValueARM_OpARMSUBshiftLLreg(v, config)
+       case OpARMSUBshiftRA:
+               return rewriteValueARM_OpARMSUBshiftRA(v, config)
+       case OpARMSUBshiftRAreg:
+               return rewriteValueARM_OpARMSUBshiftRAreg(v, config)
+       case OpARMSUBshiftRL:
+               return rewriteValueARM_OpARMSUBshiftRL(v, config)
+       case OpARMSUBshiftRLreg:
+               return rewriteValueARM_OpARMSUBshiftRLreg(v, config)
        case OpSelect0:
                return rewriteValueARM_OpSelect0(v, config)
        case OpSelect1:
@@ -504,6 +694,18 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMXOR(v, config)
        case OpARMXORconst:
                return rewriteValueARM_OpARMXORconst(v, config)
+       case OpARMXORshiftLL:
+               return rewriteValueARM_OpARMXORshiftLL(v, config)
+       case OpARMXORshiftLLreg:
+               return rewriteValueARM_OpARMXORshiftLLreg(v, config)
+       case OpARMXORshiftRA:
+               return rewriteValueARM_OpARMXORshiftRA(v, config)
+       case OpARMXORshiftRAreg:
+               return rewriteValueARM_OpARMXORshiftRAreg(v, config)
+       case OpARMXORshiftRL:
+               return rewriteValueARM_OpARMXORshiftRL(v, config)
+       case OpARMXORshiftRLreg:
+               return rewriteValueARM_OpARMXORshiftRLreg(v, config)
        case OpXor16:
                return rewriteValueARM_OpXor16(v, config)
        case OpXor32:
@@ -560,222 +762,242 @@ func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
                v.AddArg(flags)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMADCconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ADCconst [c] (ADDconst [d] x) flags)
+       // match: (ADC x (SLLconst [c] y) flags)
        // cond:
-       // result: (ADCconst [int64(int32(c+d))] x flags)
+       // result: (ADCshiftLL x y [c] flags)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               flags := v.Args[1]
-               v.reset(OpARMADCconst)
-               v.AuxInt = int64(int32(c + d))
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               flags := v.Args[2]
+               v.reset(OpARMADCshiftLL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                v.AddArg(flags)
                return true
        }
-       // match: (ADCconst [c] (SUBconst [d] x) flags)
+       // match: (ADC (SLLconst [c] y) x flags)
        // cond:
-       // result: (ADCconst [int64(int32(c-d))] x flags)
+       // result: (ADCshiftLL x y [c] flags)
        for {
-               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMSUBconst {
+               if v_0.Op != OpARMSLLconst {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               flags := v.Args[1]
-               v.reset(OpARMADCconst)
-               v.AuxInt = int64(int32(c - d))
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMADCshiftLL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                v.AddArg(flags)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ADD (MOVWconst [c]) x)
+       // match: (ADC x (SRLconst [c] y) flags)
        // cond:
-       // result: (ADDconst [c] x)
+       // result: (ADCshiftRL x y [c] flags)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(OpARMADDconst)
-               v.AuxInt = c
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               flags := v.Args[2]
+               v.reset(OpARMADCshiftRL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
-       // match: (ADD x (MOVWconst [c]))
+       // match: (ADC (SRLconst [c] y) x flags)
        // cond:
-       // result: (ADDconst [c] x)
+       // result: (ADCshiftRL x y [c] flags)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRLconst {
                        break
                }
-               c := v_1.AuxInt
-               v.reset(OpARMADDconst)
-               v.AuxInt = c
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMADCshiftRL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
-       // match: (ADD x (RSBconst [0] y))
+       // match: (ADC x (SRAconst [c] y) flags)
        // cond:
-       // result: (SUB x y)
+       // result: (ADCshiftRA x y [c] flags)
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMRSBconst {
-                       break
-               }
-               if v_1.AuxInt != 0 {
+               if v_1.Op != OpARMSRAconst {
                        break
                }
+               c := v_1.AuxInt
                y := v_1.Args[0]
-               v.reset(OpARMSUB)
+               flags := v.Args[2]
+               v.reset(OpARMADCshiftRA)
                v.AddArg(x)
                v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
-       // match: (ADD (MUL x y) a)
+       // match: (ADC (SRAconst [c] y) x flags)
        // cond:
-       // result: (MULA x y a)
+       // result: (ADCshiftRA x y [c] flags)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMUL {
+               if v_0.Op != OpARMSRAconst {
                        break
                }
-               x := v_0.Args[0]
-               y := v_0.Args[1]
-               a := v.Args[1]
-               v.reset(OpARMMULA)
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMADCshiftRA)
                v.AddArg(x)
                v.AddArg(y)
-               v.AddArg(a)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
-       // match: (ADD a (MUL x y))
+       // match: (ADC x (SLL y z) flags)
        // cond:
-       // result: (MULA x y a)
+       // result: (ADCshiftLLreg x y z flags)
        for {
-               a := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMUL {
+               if v_1.Op != OpARMSLL {
                        break
                }
-               x := v_1.Args[0]
-               y := v_1.Args[1]
-               v.reset(OpARMMULA)
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMADCshiftLLreg)
                v.AddArg(x)
                v.AddArg(y)
-               v.AddArg(a)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ADDS (MOVWconst [c]) x)
+       // match: (ADC (SLL y z) x flags)
        // cond:
-       // result: (ADDSconst [c] x)
+       // result: (ADCshiftLLreg x y z flags)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMSLL {
                        break
                }
-               c := v_0.AuxInt
+               y := v_0.Args[0]
+               z := v_0.Args[1]
                x := v.Args[1]
-               v.reset(OpARMADDSconst)
-               v.AuxInt = c
+               flags := v.Args[2]
+               v.reset(OpARMADCshiftLLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
-       // match: (ADDS x (MOVWconst [c]))
+       // match: (ADC x (SRL y z) flags)
        // cond:
-       // result: (ADDSconst [c] x)
+       // result: (ADCshiftRLreg x y z flags)
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpARMSRL {
                        break
                }
-               c := v_1.AuxInt
-               v.reset(OpARMADDSconst)
-               v.AuxInt = c
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMADCshiftRLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMADDconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr))
+       // match: (ADC (SRL y z) x flags)
        // cond:
-       // result: (MOVWaddr [off1+off2] {sym} ptr)
+       // result: (ADCshiftRLreg x y z flags)
        for {
-               off1 := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               if v_0.Op != OpARMSRL {
                        break
                }
-               off2 := v_0.AuxInt
-               sym := v_0.Aux
-               ptr := v_0.Args[0]
-               v.reset(OpARMMOVWaddr)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMADCshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
-       // match: (ADDconst [0] x)
+       // match: (ADC x (SRA y z) flags)
        // cond:
-       // result: x
+       // result: (ADCshiftRAreg x y z flags)
        for {
-               if v.AuxInt != 0 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRA {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMADCshiftRAreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
-       // match: (ADDconst [c] (MOVWconst [d]))
+       // match: (ADC (SRA y z) x flags)
        // cond:
-       // result: (MOVWconst [int64(int32(c+d))])
+       // result: (ADCshiftRAreg x y z flags)
        for {
-               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMSRA {
                        break
                }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(int32(c + d))
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMADCshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
-       // match: (ADDconst [c] (ADDconst [d] x))
+       return false
+}
+func rewriteValueARM_OpARMADCconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ADCconst [c] (ADDconst [d] x) flags)
        // cond:
-       // result: (ADDconst [int64(int32(c+d))] x)
+       // result: (ADCconst [int64(int32(c+d))] x flags)
        for {
                c := v.AuxInt
                v_0 := v.Args[0]
@@ -784,14 +1006,16 @@ func rewriteValueARM_OpARMADDconst(v *Value, config *Config) bool {
                }
                d := v_0.AuxInt
                x := v_0.Args[0]
-               v.reset(OpARMADDconst)
+               flags := v.Args[1]
+               v.reset(OpARMADCconst)
                v.AuxInt = int64(int32(c + d))
                v.AddArg(x)
+               v.AddArg(flags)
                return true
        }
-       // match: (ADDconst [c] (SUBconst [d] x))
+       // match: (ADCconst [c] (SUBconst [d] x) flags)
        // cond:
-       // result: (ADDconst [int64(int32(c-d))] x)
+       // result: (ADCconst [int64(int32(c-d))] x flags)
        for {
                c := v.AuxInt
                v_0 := v.Args[0]
@@ -800,35 +1024,21 @@ func rewriteValueARM_OpARMADDconst(v *Value, config *Config) bool {
                }
                d := v_0.AuxInt
                x := v_0.Args[0]
-               v.reset(OpARMADDconst)
+               flags := v.Args[1]
+               v.reset(OpARMADCconst)
                v.AuxInt = int64(int32(c - d))
                v.AddArg(x)
-               return true
-       }
-       // match: (ADDconst [c] (RSBconst [d] x))
-       // cond:
-       // result: (RSBconst [int64(int32(c+d))] x)
-       for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMRSBconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = int64(int32(c + d))
-               v.AddArg(x)
+               v.AddArg(flags)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADCshiftLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (AND (MOVWconst [c]) x)
+       // match: (ADCshiftLL (MOVWconst [c]) x [d] flags)
        // cond:
-       // result: (ANDconst [c] x)
+       // result: (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -836,14 +1046,20 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               v.reset(OpARMANDconst)
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMADCconst)
                v.AuxInt = c
-               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
+               v.AddArg(flags)
                return true
        }
-       // match: (AND x (MOVWconst [c]))
+       // match: (ADCshiftLL x (MOVWconst [c]) [d] flags)
        // cond:
-       // result: (ANDconst [c] x)
+       // result: (ADCconst x [int64(uint32(c)<<uint64(d))] flags)
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
@@ -851,897 +1067,1153 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
                        break
                }
                c := v_1.AuxInt
-               v.reset(OpARMANDconst)
-               v.AuxInt = c
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMADCconst)
                v.AddArg(x)
+               v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(flags)
                return true
        }
-       // match: (AND x x)
+       return false
+}
+func rewriteValueARM_OpARMADCshiftLLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ADCshiftLLreg (MOVWconst [c]) x y flags)
        // cond:
-       // result: x
+       // result: (ADCconst [c] (SLL <x.Type> x y) flags)
        for {
-               x := v.Args[0]
-               if x != v.Args[1] {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               flags := v.Args[3]
+               v.reset(OpARMADCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v.AddArg(flags)
                return true
        }
-       // match: (AND x (MVN y))
+       // match: (ADCshiftLLreg x y (MOVWconst [c]) flags)
        // cond:
-       // result: (BIC x y)
+       // result: (ADCshiftLL x y [c] flags)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMVN {
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               y := v_1.Args[0]
-               v.reset(OpARMBIC)
+               c := v_2.AuxInt
+               flags := v.Args[3]
+               v.reset(OpARMADCshiftLL)
                v.AddArg(x)
                v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMANDconst(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADCshiftRA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ANDconst [0] _)
+       // match: (ADCshiftRA (MOVWconst [c]) x [d] flags)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
        for {
-               if v.AuxInt != 0 {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMADCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
+               v.AddArg(flags)
                return true
        }
-       // match: (ANDconst [c] x)
-       // cond: int32(c)==-1
-       // result: x
+       // match: (ADCshiftRA x (MOVWconst [c]) [d] flags)
+       // cond:
+       // result: (ADCconst x [int64(int32(c)>>uint64(d))] flags)
        for {
-               c := v.AuxInt
                x := v.Args[0]
-               if !(int32(c) == -1) {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               c := v_1.AuxInt
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMADCconst)
                v.AddArg(x)
+               v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(flags)
                return true
        }
-       // match: (ANDconst [c] (MOVWconst [d]))
+       return false
+}
+func rewriteValueARM_OpARMADCshiftRAreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ADCshiftRAreg (MOVWconst [c]) x y flags)
        // cond:
-       // result: (MOVWconst [c&d])
+       // result: (ADCconst [c] (SRA <x.Type> x y) flags)
        for {
-               c := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = c & d
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               flags := v.Args[3]
+               v.reset(OpARMADCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v.AddArg(flags)
                return true
        }
-       // match: (ANDconst [c] (ANDconst [d] x))
+       // match: (ADCshiftRAreg x y (MOVWconst [c]) flags)
        // cond:
-       // result: (ANDconst [c&d] x)
+       // result: (ADCshiftRA x y [c] flags)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMANDconst {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMANDconst)
-               v.AuxInt = c & d
+               c := v_2.AuxInt
+               flags := v.Args[3]
+               v.reset(OpARMADCshiftRA)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
        return false
 }
-func rewriteValueARM_OpAdd16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADCshiftRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Add16 x y)
+       // match: (ADCshiftRL (MOVWconst [c]) x [d] flags)
        // cond:
-       // result: (ADD x y)
+       // result: (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMADD)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMADCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
+               v.AddArg(flags)
                return true
        }
-}
-func rewriteValueARM_OpAdd32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add32 x y)
+       // match: (ADCshiftRL x (MOVWconst [c]) [d] flags)
        // cond:
-       // result: (ADD x y)
+       // result: (ADCconst x [int64(uint32(c)>>uint64(d))] flags)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMADD)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMADCconst)
                v.AddArg(x)
-               v.AddArg(y)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(flags)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpAdd32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADCshiftRLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Add32F x y)
+       // match: (ADCshiftRLreg (MOVWconst [c]) x y flags)
        // cond:
-       // result: (ADDF x y)
+       // result: (ADCconst [c] (SRL <x.Type> x y) flags)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMADDF)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               flags := v.Args[3]
+               v.reset(OpARMADCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v.AddArg(flags)
                return true
        }
-}
-func rewriteValueARM_OpAdd32carry(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add32carry x y)
+       // match: (ADCshiftRLreg x y (MOVWconst [c]) flags)
        // cond:
-       // result: (ADDS x y)
+       // result: (ADCshiftRL x y [c] flags)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMADDS)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               flags := v.Args[3]
+               v.reset(OpARMADCshiftRL)
                v.AddArg(x)
                v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpAdd32withcarry(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Add32withcarry x y c)
+       // match: (ADD (MOVWconst [c]) x)
        // cond:
-       // result: (ADC x y c)
+       // result: (ADDconst [c] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               c := v.Args[2]
-               v.reset(OpARMADC)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMADDconst)
+               v.AuxInt = c
                v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(c)
                return true
        }
-}
-func rewriteValueARM_OpAdd64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add64F x y)
+       // match: (ADD x (MOVWconst [c]))
        // cond:
-       // result: (ADDD x y)
+       // result: (ADDconst [c] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMADDD)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMADDconst)
+               v.AuxInt = c
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpAdd8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add8 x y)
+       // match: (ADD x (SLLconst [c] y))
        // cond:
-       // result: (ADD x y)
+       // result: (ADDshiftLL x y [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMADD)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMADDshiftLL)
                v.AddArg(x)
                v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpAddPtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (AddPtr x y)
+       // match: (ADD (SLLconst [c] y) x)
        // cond:
-       // result: (ADD x y)
+       // result: (ADDshiftLL x y [c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMADD)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMADDshiftLL)
                v.AddArg(x)
                v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpAddr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Addr {sym} base)
+       // match: (ADD x (SRLconst [c] y))
        // cond:
-       // result: (MOVWaddr {sym} base)
+       // result: (ADDshiftRL x y [c])
        for {
-               sym := v.Aux
-               base := v.Args[0]
-               v.reset(OpARMMOVWaddr)
-               v.Aux = sym
-               v.AddArg(base)
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMADDshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpAnd16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And16 x y)
+       // match: (ADD (SRLconst [c] y) x)
        // cond:
-       // result: (AND x y)
+       // result: (ADDshiftRL x y [c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMAND)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMADDshiftRL)
                v.AddArg(x)
                v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpAnd32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And32 x y)
+       // match: (ADD x (SRAconst [c] y))
        // cond:
-       // result: (AND x y)
+       // result: (ADDshiftRA x y [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMAND)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMADDshiftRA)
                v.AddArg(x)
                v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpAnd8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And8 x y)
+       // match: (ADD (SRAconst [c] y) x)
        // cond:
-       // result: (AND x y)
+       // result: (ADDshiftRA x y [c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMAND)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMADDshiftRA)
                v.AddArg(x)
                v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpAndB(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (AndB x y)
+       // match: (ADD x (SLL y z))
        // cond:
-       // result: (AND x y)
+       // result: (ADDshiftLLreg x y z)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMAND)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLL {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMADDshiftLLreg)
                v.AddArg(x)
                v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (BIC x (MOVWconst [c]))
+       // match: (ADD (SLL y z) x)
        // cond:
-       // result: (BICconst [c] x)
+       // result: (ADDshiftLLreg x y z)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLL {
                        break
                }
-               c := v_1.AuxInt
-               v.reset(OpARMBICconst)
-               v.AuxInt = c
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMADDshiftLLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (BIC x x)
+       // match: (ADD x (SRL y z))
        // cond:
-       // result: (MOVWconst [0])
+       // result: (ADDshiftRLreg x y z)
        for {
                x := v.Args[0]
-               if x != v.Args[1] {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMADDshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMBICconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (BICconst [0] x)
+       // match: (ADD (SRL y z) x)
        // cond:
-       // result: x
+       // result: (ADDshiftRLreg x y z)
        for {
-               if v.AuxInt != 0 {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRL {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMADDshiftRLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (BICconst [c] _)
-       // cond: int32(c)==-1
-       // result: (MOVWconst [0])
+       // match: (ADD x (SRA y z))
+       // cond:
+       // result: (ADDshiftRAreg x y z)
        for {
-               c := v.AuxInt
-               if !(int32(c) == -1) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRA {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMADDshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (BICconst [c] (MOVWconst [d]))
+       // match: (ADD (SRA y z) x)
        // cond:
-       // result: (MOVWconst [d&^c])
+       // result: (ADDshiftRAreg x y z)
        for {
-               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMSRA {
                        break
                }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = d &^ c
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMADDshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (CMP x (MOVWconst [c]))
+       // match: (ADD x (RSBconst [0] y))
        // cond:
-       // result: (CMPconst [c] x)
+       // result: (SUB x y)
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpARMRSBconst {
                        break
                }
-               c := v_1.AuxInt
-               v.reset(OpARMCMPconst)
-               v.AuxInt = c
+               if v_1.AuxInt != 0 {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(OpARMSUB)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMP (MOVWconst [c]) x)
+       // match: (ADD (RSBconst [0] y) x)
        // cond:
-       // result: (InvertFlags (CMPconst [c] x))
+       // result: (SUB x y)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMRSBconst {
                        break
                }
-               c := v_0.AuxInt
+               if v_0.AuxInt != 0 {
+                       break
+               }
+               y := v_0.Args[0]
                x := v.Args[1]
-               v.reset(OpARMInvertFlags)
-               v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v0.AuxInt = c
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.reset(OpARMSUB)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (CMPconst (MOVWconst [x]) [y])
-       // cond: int32(x)==int32(y)
-       // result: (FlagEQ)
+       // match: (ADD (MUL x y) a)
+       // cond:
+       // result: (MULA x y a)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int32(x) == int32(y)) {
+               if v_0.Op != OpARMMUL {
                        break
                }
-               v.reset(OpARMFlagEQ)
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               a := v.Args[1]
+               v.reset(OpARMMULA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(a)
                return true
        }
-       // match: (CMPconst (MOVWconst [x]) [y])
-       // cond: int32(x)<int32(y) && uint32(x)<uint32(y)
-       // result: (FlagLT_ULT)
+       // match: (ADD a (MUL x y))
+       // cond:
+       // result: (MULA x y a)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
+               a := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMUL {
                        break
                }
-               v.reset(OpARMFlagLT_ULT)
+               x := v_1.Args[0]
+               y := v_1.Args[1]
+               v.reset(OpARMMULA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(a)
                return true
        }
-       // match: (CMPconst (MOVWconst [x]) [y])
-       // cond: int32(x)<int32(y) && uint32(x)>uint32(y)
-       // result: (FlagLT_UGT)
+       return false
+}
+func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ADDS (MOVWconst [c]) x)
+       // cond:
+       // result: (ADDSconst [c] x)
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
-                       break
-               }
-               v.reset(OpARMFlagLT_UGT)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMADDSconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (CMPconst (MOVWconst [x]) [y])
-       // cond: int32(x)>int32(y) && uint32(x)<uint32(y)
-       // result: (FlagGT_ULT)
+       // match: (ADDS x (MOVWconst [c]))
+       // cond:
+       // result: (ADDSconst [c] x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMFlagGT_ULT)
+               c := v_1.AuxInt
+               v.reset(OpARMADDSconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (CMPconst (MOVWconst [x]) [y])
-       // cond: int32(x)>int32(y) && uint32(x)>uint32(y)
-       // result: (FlagGT_UGT)
+       // match: (ADDS x (SLLconst [c] y))
+       // cond:
+       // result: (ADDSshiftLL x y [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
                        break
                }
-               v.reset(OpARMFlagGT_UGT)
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMADDSshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (CMPconst (MOVBUreg _) [c])
-       // cond: 0xff < c
-       // result: (FlagLT_ULT)
+       // match: (ADDS (SLLconst [c] y) x)
+       // cond:
+       // result: (ADDSshiftLL x y [c])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVBUreg {
-                       break
-               }
-               c := v.AuxInt
-               if !(0xff < c) {
+               if v_0.Op != OpARMSLLconst {
                        break
                }
-               v.reset(OpARMFlagLT_ULT)
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMADDSshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (CMPconst (MOVHUreg _) [c])
-       // cond: 0xffff < c
-       // result: (FlagLT_ULT)
+       // match: (ADDS x (SRLconst [c] y))
+       // cond:
+       // result: (ADDSshiftRL x y [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVHUreg {
-                       break
-               }
-               c := v.AuxInt
-               if !(0xffff < c) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
                        break
                }
-               v.reset(OpARMFlagLT_ULT)
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMADDSshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (CMPconst (ANDconst _ [m]) [n])
-       // cond: 0 <= int32(m) && int32(m) < int32(n)
-       // result: (FlagLT_ULT)
+       // match: (ADDS (SRLconst [c] y) x)
+       // cond:
+       // result: (ADDSshiftRL x y [c])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMANDconst {
+               if v_0.Op != OpARMSRLconst {
                        break
                }
-               m := v_0.AuxInt
-               n := v.AuxInt
-               if !(0 <= int32(m) && int32(m) < int32(n)) {
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMADDSshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       // match: (ADDS x (SRAconst [c] y))
+       // cond:
+       // result: (ADDSshiftRA x y [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
                        break
                }
-               v.reset(OpARMFlagLT_ULT)
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMADDSshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (CMPconst (SRLconst _ [c]) [n])
-       // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)
-       // result: (FlagLT_ULT)
+       // match: (ADDS (SRAconst [c] y) x)
+       // cond:
+       // result: (ADDSshiftRA x y [c])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMSRLconst {
+               if v_0.Op != OpARMSRAconst {
                        break
                }
                c := v_0.AuxInt
-               n := v.AuxInt
-               if !(0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)) {
-                       break
-               }
-               v.reset(OpARMFlagLT_ULT)
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMADDSshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       return false
-}
-func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ClosureCall [argwid] entry closure mem)
+       // match: (ADDS x (SLL y z))
        // cond:
-       // result: (CALLclosure [argwid] entry closure mem)
+       // result: (ADDSshiftLLreg x y z)
        for {
-               argwid := v.AuxInt
-               entry := v.Args[0]
-               closure := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMCALLclosure)
-               v.AuxInt = argwid
-               v.AddArg(entry)
-               v.AddArg(closure)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLL {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMADDSshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpCom16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Com16 x)
+       // match: (ADDS (SLL y z) x)
        // cond:
-       // result: (MVN x)
+       // result: (ADDSshiftLLreg x y z)
        for {
-               x := v.Args[0]
-               v.reset(OpARMMVN)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLL {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMADDSshiftLLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpCom32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Com32 x)
+       // match: (ADDS x (SRL y z))
        // cond:
-       // result: (MVN x)
+       // result: (ADDSshiftRLreg x y z)
        for {
                x := v.Args[0]
-               v.reset(OpARMMVN)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRL {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMADDSshiftRLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpCom8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Com8 x)
+       // match: (ADDS (SRL y z) x)
        // cond:
-       // result: (MVN x)
+       // result: (ADDSshiftRLreg x y z)
        for {
-               x := v.Args[0]
-               v.reset(OpARMMVN)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRL {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMADDSshiftRLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpConst16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const16 [val])
+       // match: (ADDS x (SRA y z))
        // cond:
-       // result: (MOVWconst [val])
+       // result: (ADDSshiftRAreg x y z)
        for {
-               val := v.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = val
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRA {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMADDSshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpConst32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const32 [val])
+       // match: (ADDS (SRA y z) x)
        // cond:
-       // result: (MOVWconst [val])
+       // result: (ADDSshiftRAreg x y z)
        for {
-               val := v.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = val
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRA {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMADDSshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpConst32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDSshiftLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Const32F [val])
+       // match: (ADDSshiftLL (MOVWconst [c]) x [d])
        // cond:
-       // result: (MOVFconst [val])
+       // result: (ADDSconst [c] (SLLconst <x.Type> x [d]))
        for {
-               val := v.AuxInt
-               v.reset(OpARMMOVFconst)
-               v.AuxInt = val
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMADDSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpConst64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const64F [val])
+       // match: (ADDSshiftLL x (MOVWconst [c]) [d])
        // cond:
-       // result: (MOVDconst [val])
+       // result: (ADDSconst x [int64(uint32(c)<<uint64(d))])
        for {
-               val := v.AuxInt
-               v.reset(OpARMMOVDconst)
-               v.AuxInt = val
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMADDSconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) << uint64(d))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpConst8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDSshiftLLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Const8 [val])
+       // match: (ADDSshiftLLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (MOVWconst [val])
+       // result: (ADDSconst [c] (SLL <x.Type> x y))
        for {
-               val := v.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = val
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMADDSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpConstBool(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ConstBool [b])
+       // match: (ADDSshiftLLreg x y (MOVWconst [c]))
        // cond:
-       // result: (MOVWconst [b])
+       // result: (ADDSshiftLL x y [c])
        for {
-               b := v.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = b
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMADDSshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpConstNil(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDSshiftRA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ConstNil)
+       // match: (ADDSshiftRA (MOVWconst [c]) x [d])
        // cond:
-       // result: (MOVWconst [0])
+       // result: (ADDSconst [c] (SRAconst <x.Type> x [d]))
        for {
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMADDSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpConvert(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Convert x mem)
+       // match: (ADDSshiftRA x (MOVWconst [c]) [d])
        // cond:
-       // result: (MOVWconvert x mem)
+       // result: (ADDSconst x [int64(int32(c)>>uint64(d))])
        for {
                x := v.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMMOVWconvert)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMADDSconst)
                v.AddArg(x)
-               v.AddArg(mem)
+               v.AuxInt = int64(int32(c) >> uint64(d))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpCvt32Fto32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDSshiftRAreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt32Fto32 x)
+       // match: (ADDSshiftRAreg (MOVWconst [c]) x y)
        // cond:
-       // result: (MOVFW x)
+       // result: (ADDSconst [c] (SRA <x.Type> x y))
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVFW)
-               v.AddArg(x)
-               return true
-       }
-}
-func rewriteValueARM_OpCvt32Fto32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32Fto32U x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMADDSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (ADDSshiftRAreg x y (MOVWconst [c]))
        // cond:
-       // result: (MOVFWU x)
+       // result: (ADDSshiftRA x y [c])
        for {
                x := v.Args[0]
-               v.reset(OpARMMOVFWU)
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMADDSshiftRA)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpCvt32Fto64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDSshiftRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt32Fto64F x)
+       // match: (ADDSshiftRL (MOVWconst [c]) x [d])
        // cond:
-       // result: (MOVFD x)
+       // result: (ADDSconst [c] (SRLconst <x.Type> x [d]))
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVFD)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMADDSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpCvt32Uto32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32Uto32F x)
+       // match: (ADDSshiftRL x (MOVWconst [c]) [d])
        // cond:
-       // result: (MOVWUF x)
+       // result: (ADDSconst x [int64(uint32(c)>>uint64(d))])
        for {
                x := v.Args[0]
-               v.reset(OpARMMOVWUF)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMADDSconst)
                v.AddArg(x)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpCvt32Uto64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDSshiftRLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt32Uto64F x)
+       // match: (ADDSshiftRLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (MOVWUD x)
+       // result: (ADDSconst [c] (SRL <x.Type> x y))
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVWUD)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMADDSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpCvt32to32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32to32F x)
+       // match: (ADDSshiftRLreg x y (MOVWconst [c]))
        // cond:
-       // result: (MOVWF x)
+       // result: (ADDSshiftRL x y [c])
        for {
                x := v.Args[0]
-               v.reset(OpARMMOVWF)
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMADDSshiftRL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpCvt32to64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt32to64F x)
+       // match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr))
        // cond:
-       // result: (MOVWD x)
+       // result: (MOVWaddr [off1+off2] {sym} ptr)
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVWD)
-               v.AddArg(x)
+               off1 := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym := v_0.Aux
+               ptr := v_0.Args[0]
+               v.reset(OpARMMOVWaddr)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
                return true
        }
-}
-func rewriteValueARM_OpCvt64Fto32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt64Fto32 x)
+       // match: (ADDconst [0] x)
        // cond:
-       // result: (MOVDW x)
+       // result: x
        for {
+               if v.AuxInt != 0 {
+                       break
+               }
                x := v.Args[0]
-               v.reset(OpARMMOVDW)
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpCvt64Fto32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt64Fto32F x)
+       // match: (ADDconst [c] (MOVWconst [d]))
        // cond:
-       // result: (MOVDF x)
+       // result: (MOVWconst [int64(int32(c+d))])
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVDF)
-               v.AddArg(x)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(int32(c + d))
                return true
        }
-}
-func rewriteValueARM_OpCvt64Fto32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt64Fto32U x)
+       // match: (ADDconst [c] (ADDconst [d] x))
        // cond:
-       // result: (MOVDWU x)
+       // result: (ADDconst [int64(int32(c+d))] x)
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVDWU)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMADDconst)
+               v.AuxInt = int64(int32(c + d))
                v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpARMDIV(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (DIV (MOVWconst [c]) (MOVWconst [d]))
+       // match: (ADDconst [c] (SUBconst [d] x))
        // cond:
-       // result: (MOVWconst [int64(int32(c)/int32(d))])
+       // result: (ADDconst [int64(int32(c-d))] x)
        for {
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMSUBconst {
                        break
                }
-               c := v_0.AuxInt
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMADDconst)
+               v.AuxInt = int64(int32(c - d))
+               v.AddArg(x)
+               return true
+       }
+       // match: (ADDconst [c] (RSBconst [d] x))
+       // cond:
+       // result: (RSBconst [int64(int32(c+d))] x)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMRSBconst {
                        break
                }
-               d := v_1.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(int32(c) / int32(d))
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMRSBconst)
+               v.AuxInt = int64(int32(c + d))
+               v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMDIVU(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDshiftLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (DIVU x (MOVWconst [1]))
+       // match: (ADDshiftLL (MOVWconst [c]) x [d])
        // cond:
-       // result: x
+       // result: (ADDconst [c] (SLLconst <x.Type> x [d]))
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_1.AuxInt != 1 {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMADDconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
                return true
        }
-       // match: (DIVU x (MOVWconst [c]))
-       // cond: isPowerOfTwo(c)
-       // result: (SRLconst [log2(c)] x)
+       // match: (ADDshiftLL x (MOVWconst [c]) [d])
+       // cond:
+       // result: (ADDconst x [int64(uint32(c)<<uint64(d))])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
@@ -1749,2485 +2221,2633 @@ func rewriteValueARM_OpARMDIVU(v *Value, config *Config) bool {
                        break
                }
                c := v_1.AuxInt
-               if !(isPowerOfTwo(c)) {
-                       break
-               }
-               v.reset(OpARMSRLconst)
-               v.AuxInt = log2(c)
+               d := v.AuxInt
+               v.reset(OpARMADDconst)
                v.AddArg(x)
+               v.AuxInt = int64(uint32(c) << uint64(d))
                return true
        }
-       // match: (DIVU (MOVWconst [c]) (MOVWconst [d]))
+       return false
+}
+func rewriteValueARM_OpARMADDshiftLLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ADDshiftLLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (MOVWconst [int64(uint32(c)/uint32(d))])
+       // result: (ADDconst [c] (SLL <x.Type> x y))
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMADDconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (ADDshiftLLreg x y (MOVWconst [c]))
+       // cond:
+       // result: (ADDshiftLL x y [c])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               d := v_1.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(uint32(c) / uint32(d))
+               c := v_2.AuxInt
+               v.reset(OpARMADDshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
        return false
 }
-func rewriteValueARM_OpDeferCall(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDshiftRA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (DeferCall [argwid] mem)
+       // match: (ADDshiftRA (MOVWconst [c]) x [d])
        // cond:
-       // result: (CALLdefer [argwid] mem)
+       // result: (ADDconst [c] (SRAconst <x.Type> x [d]))
        for {
-               argwid := v.AuxInt
-               mem := v.Args[0]
-               v.reset(OpARMCALLdefer)
-               v.AuxInt = argwid
-               v.AddArg(mem)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMADDconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpDiv16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div16 x y)
+       // match: (ADDshiftRA x (MOVWconst [c]) [d])
        // cond:
-       // result: (DIV (SignExt16to32 x) (SignExt16to32 y))
+       // result: (ADDconst x [int64(int32(c)>>uint64(d))])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMDIV)
-               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMADDconst)
+               v.AddArg(x)
+               v.AuxInt = int64(int32(c) >> uint64(d))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpDiv16u(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDshiftRAreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div16u x y)
+       // match: (ADDshiftRAreg (MOVWconst [c]) x y)
        // cond:
-       // result: (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))
+       // result: (ADDconst [c] (SRA <x.Type> x y))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMDIVU)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMADDconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
                v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
                return true
        }
-}
-func rewriteValueARM_OpDiv32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div32 x y)
+       // match: (ADDshiftRAreg x y (MOVWconst [c]))
        // cond:
-       // result: (DIV x y)
+       // result: (ADDshiftRA x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMDIV)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMADDshiftRA)
                v.AddArg(x)
                v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpDiv32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDshiftRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div32F x y)
+       // match: (ADDshiftRL (MOVWconst [c]) x [d])
        // cond:
-       // result: (DIVF x y)
+       // result: (ADDconst [c] (SRLconst <x.Type> x [d]))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMDIVF)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMADDconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpDiv32u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div32u x y)
+       // match: (ADDshiftRL x (MOVWconst [c]) [d])
        // cond:
-       // result: (DIVU x y)
+       // result: (ADDconst x [int64(uint32(c)>>uint64(d))])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMDIVU)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMADDconst)
                v.AddArg(x)
-               v.AddArg(y)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpDiv64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDshiftRLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div64F x y)
+       // match: (ADDshiftRLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (DIVD x y)
+       // result: (ADDconst [c] (SRL <x.Type> x y))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMDIVD)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMADDconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpDiv8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div8 x y)
+       // match: (ADDshiftRLreg x y (MOVWconst [c]))
        // cond:
-       // result: (DIV (SignExt8to32 x) (SignExt8to32 y))
+       // result: (ADDshiftRL x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMDIV)
-               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMADDshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpDiv8u(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div8u x y)
+       // match: (AND (MOVWconst [c]) x)
        // cond:
-       // result: (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))
+       // result: (ANDconst [c] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMDIVU)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMANDconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpEq16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq16 x y)
+       // match: (AND x (MOVWconst [c]))
        // cond:
-       // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (ANDconst [c] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMANDconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpEq32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq32 x y)
+       // match: (AND x (SLLconst [c] y))
        // cond:
-       // result: (Equal (CMP x y))
+       // result: (ANDshiftLL x y [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMANDshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpEq32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq32F x y)
+       // match: (AND (SLLconst [c] y) x)
        // cond:
-       // result: (Equal (CMPF x y))
+       // result: (ANDshiftLL x y [c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMANDshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpEq64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq64F x y)
+       // match: (AND x (SRLconst [c] y))
        // cond:
-       // result: (Equal (CMPD x y))
+       // result: (ANDshiftRL x y [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMANDshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpEq8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq8 x y)
+       // match: (AND (SRLconst [c] y) x)
        // cond:
-       // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (ANDshiftRL x y [c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMANDshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpEqB(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (EqB x y)
+       // match: (AND x (SRAconst [c] y))
        // cond:
-       // result: (XORconst [1] (XOR <config.fe.TypeBool()> x y))
+       // result: (ANDshiftRA x y [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMXORconst)
-               v.AuxInt = 1
-               v0 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeBool())
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMANDshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpEqPtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (EqPtr x y)
+       // match: (AND (SRAconst [c] y) x)
        // cond:
-       // result: (Equal (CMP x y))
+       // result: (ANDshiftRA x y [c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMANDshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpARMEqual(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Equal (FlagEQ))
+       // match: (AND x (SLL y z))
        // cond:
-       // result: (MOVWconst [1])
+       // result: (ANDshiftLLreg x y z)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMANDshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (Equal (FlagLT_ULT))
+       // match: (AND (SLL y z) x)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (ANDshiftLLreg x y z)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               if v_0.Op != OpARMSLL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMANDshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (Equal (FlagLT_UGT))
+       // match: (AND x (SRL y z))
        // cond:
-       // result: (MOVWconst [0])
+       // result: (ANDshiftRLreg x y z)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMANDshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (Equal (FlagGT_ULT))
+       // match: (AND (SRL y z) x)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (ANDshiftRLreg x y z)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               if v_0.Op != OpARMSRL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMANDshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (Equal (FlagGT_UGT))
+       // match: (AND x (SRA y z))
        // cond:
-       // result: (MOVWconst [0])
+       // result: (ANDshiftRAreg x y z)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRA {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMANDshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (Equal (InvertFlags x))
+       // match: (AND (SRA y z) x)
        // cond:
-       // result: (Equal x)
+       // result: (ANDshiftRAreg x y z)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               if v_0.Op != OpARMSRA {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMEqual)
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMANDshiftRAreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpGeq16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq16 x y)
+       // match: (AND x x)
        // cond:
-       // result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpGeq16U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq16U x y)
+       // match: (AND x (MVN y))
        // cond:
-       // result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (BIC x y)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqualU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMVN {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(OpARMBIC)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpGeq32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq32 x y)
+       // match: (AND x (MVNshiftLL y [c]))
        // cond:
-       // result: (GreaterEqual (CMP x y))
+       // result: (BICshiftLL x y [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMVNshiftLL {
+                       break
+               }
+               y := v_1.Args[0]
+               c := v_1.AuxInt
+               v.reset(OpARMBICshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpGeq32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq32F x y)
+       // match: (AND x (MVNshiftRL y [c]))
        // cond:
-       // result: (GreaterEqual (CMPF x y))
+       // result: (BICshiftRL x y [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMVNshiftRL {
+                       break
+               }
+               y := v_1.Args[0]
+               c := v_1.AuxInt
+               v.reset(OpARMBICshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpGeq32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq32U x y)
+       // match: (AND x (MVNshiftRA y [c]))
        // cond:
-       // result: (GreaterEqualU (CMP x y))
+       // result: (BICshiftRA x y [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqualU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMVNshiftRA {
+                       break
+               }
+               y := v_1.Args[0]
+               c := v_1.AuxInt
+               v.reset(OpARMBICshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpGeq64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq64F x y)
+       // match: (ANDconst [0] _)
        // cond:
-       // result: (GreaterEqual (CMPD x y))
+       // result: (MOVWconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               if v.AuxInt != 0 {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpGeq8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq8 x y)
-       // cond:
-       // result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+       // match: (ANDconst [c] x)
+       // cond: int32(c)==-1
+       // result: x
        for {
+               c := v.AuxInt
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if !(int32(c) == -1) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpGeq8U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq8U x y)
+       // match: (ANDconst [c] (MOVWconst [d]))
        // cond:
-       // result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (MOVWconst [c&d])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqualU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = c & d
                return true
        }
-}
-func rewriteValueARM_OpGetClosurePtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (GetClosurePtr)
+       // match: (ANDconst [c] (ANDconst [d] x))
        // cond:
-       // result: (LoweredGetClosurePtr)
+       // result: (ANDconst [c&d] x)
        for {
-               v.reset(OpARMLoweredGetClosurePtr)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMANDconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMANDconst)
+               v.AuxInt = c & d
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpGoCall(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (GoCall [argwid] mem)
+       // match: (ANDshiftLL (MOVWconst [c]) x [d])
        // cond:
-       // result: (CALLgo [argwid] mem)
+       // result: (ANDconst [c] (SLLconst <x.Type> x [d]))
        for {
-               argwid := v.AuxInt
-               mem := v.Args[0]
-               v.reset(OpARMCALLgo)
-               v.AuxInt = argwid
-               v.AddArg(mem)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMANDconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpGreater16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater16 x y)
+       // match: (ANDshiftLL x (MOVWconst [c]) [d])
        // cond:
-       // result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+       // result: (ANDconst x [int64(uint32(c)<<uint64(d))])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThan)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMANDconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) << uint64(d))
                return true
        }
-}
-func rewriteValueARM_OpGreater16U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater16U x y)
-       // cond:
-       // result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // match: (ANDshiftLL x y:(SLLconst x [c]) [d])
+       // cond: c==d
+       // result: y
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMGreaterThanU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if y.Op != OpARMSLLconst {
+                       break
+               }
+               if x != y.Args[0] {
+                       break
+               }
+               c := y.AuxInt
+               d := v.AuxInt
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = y.Type
+               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpGreater32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftLLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater32 x y)
+       // match: (ANDshiftLLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (GreaterThan (CMP x y))
+       // result: (ANDconst [c] (SLL <x.Type> x y))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThan)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMANDconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpGreater32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater32F x y)
+       // match: (ANDshiftLLreg x y (MOVWconst [c]))
        // cond:
-       // result: (GreaterThan (CMPF x y))
+       // result: (ANDshiftLL x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMGreaterThan)
-               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMANDshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpGreater32U(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftRA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater32U x y)
+       // match: (ANDshiftRA (MOVWconst [c]) x [d])
        // cond:
-       // result: (GreaterThanU (CMP x y))
+       // result: (ANDconst [c] (SRAconst <x.Type> x [d]))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThanU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMANDconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
                v0.AddArg(x)
-               v0.AddArg(y)
+               v0.AuxInt = d
                v.AddArg(v0)
                return true
        }
+       // match: (ANDshiftRA x (MOVWconst [c]) [d])
+       // cond:
+       // result: (ANDconst x [int64(int32(c)>>uint64(d))])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMANDconst)
+               v.AddArg(x)
+               v.AuxInt = int64(int32(c) >> uint64(d))
+               return true
+       }
+       // match: (ANDshiftRA x y:(SRAconst x [c]) [d])
+       // cond: c==d
+       // result: y
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               if y.Op != OpARMSRAconst {
+                       break
+               }
+               if x != y.Args[0] {
+                       break
+               }
+               c := y.AuxInt
+               d := v.AuxInt
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = y.Type
+               v.AddArg(y)
+               return true
+       }
+       return false
 }
-func rewriteValueARM_OpGreater64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftRAreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater64F x y)
+       // match: (ANDshiftRAreg (MOVWconst [c]) x y)
        // cond:
-       // result: (GreaterThan (CMPD x y))
+       // result: (ANDconst [c] (SRA <x.Type> x y))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThan)
-               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMANDconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpGreater8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater8 x y)
+       // match: (ANDshiftRAreg x y (MOVWconst [c]))
        // cond:
-       // result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+       // result: (ANDshiftRA x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMGreaterThan)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMANDshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpGreater8U(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater8U x y)
+       // match: (ANDshiftRL (MOVWconst [c]) x [d])
        // cond:
-       // result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThanU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValueARM_OpARMGreaterEqual(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (GreaterEqual (FlagEQ))
-       // cond:
-       // result: (MOVWconst [1])
+       // result: (ANDconst [c] (SRLconst <x.Type> x [d]))
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMANDconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
                return true
        }
-       // match: (GreaterEqual (FlagLT_ULT))
+       // match: (ANDshiftRL x (MOVWconst [c]) [d])
        // cond:
-       // result: (MOVWconst [0])
+       // result: (ANDconst x [int64(uint32(c)>>uint64(d))])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMANDconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
                return true
        }
-       // match: (GreaterEqual (FlagLT_UGT))
-       // cond:
-       // result: (MOVWconst [0])
+       // match: (ANDshiftRL x y:(SRLconst x [c]) [d])
+       // cond: c==d
+       // result: y
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               x := v.Args[0]
+               y := v.Args[1]
+               if y.Op != OpARMSRLconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
-               return true
-       }
-       // match: (GreaterEqual (FlagGT_ULT))
-       // cond:
-       // result: (MOVWconst [1])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               if x != y.Args[0] {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := y.AuxInt
+               d := v.AuxInt
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = y.Type
+               v.AddArg(y)
                return true
        }
-       // match: (GreaterEqual (FlagGT_UGT))
+       return false
+}
+func rewriteValueARM_OpARMANDshiftRLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ANDshiftRLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (ANDconst [c] (SRL <x.Type> x y))
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMANDconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (GreaterEqual (InvertFlags x))
+       // match: (ANDshiftRLreg x y (MOVWconst [c]))
        // cond:
-       // result: (LessEqual x)
+       // result: (ANDshiftRL x y [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMLessEqual)
+               c := v_2.AuxInt
+               v.reset(OpARMANDshiftRL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMGreaterEqualU(v *Value, config *Config) bool {
+func rewriteValueARM_OpAdd16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (GreaterEqualU (FlagEQ))
+       // match: (Add16 x y)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (ADD x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMADD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (GreaterEqualU (FlagLT_ULT))
+}
+func rewriteValueARM_OpAdd32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add32 x y)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (ADD x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMADD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (GreaterEqualU (FlagLT_UGT))
+}
+func rewriteValueARM_OpAdd32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add32F x y)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (ADDF x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMADDF)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (GreaterEqualU (FlagGT_ULT))
+}
+func rewriteValueARM_OpAdd32carry(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add32carry x y)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (ADDS x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMADDS)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (GreaterEqualU (FlagGT_UGT))
+}
+func rewriteValueARM_OpAdd32withcarry(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add32withcarry x y c)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (ADC x y c)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               c := v.Args[2]
+               v.reset(OpARMADC)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(c)
                return true
        }
-       // match: (GreaterEqualU (InvertFlags x))
+}
+func rewriteValueARM_OpAdd64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add64F x y)
        // cond:
-       // result: (LessEqualU x)
+       // result: (ADDD x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(OpARMLessEqualU)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMADDD)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMGreaterThan(v *Value, config *Config) bool {
+func rewriteValueARM_OpAdd8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (GreaterThan (FlagEQ))
+       // match: (Add8 x y)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (ADD x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMADD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (GreaterThan (FlagLT_ULT))
+}
+func rewriteValueARM_OpAddPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (AddPtr x y)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (ADD x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMADD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (GreaterThan (FlagLT_UGT))
+}
+func rewriteValueARM_OpAddr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Addr {sym} base)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (MOVWaddr {sym} base)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               sym := v.Aux
+               base := v.Args[0]
+               v.reset(OpARMMOVWaddr)
+               v.Aux = sym
+               v.AddArg(base)
                return true
        }
-       // match: (GreaterThan (FlagGT_ULT))
-       // cond:
-       // result: (MOVWconst [1])
+}
+func rewriteValueARM_OpAnd16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (And16 x y)
+       // cond:
+       // result: (AND x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMAND)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (GreaterThan (FlagGT_UGT))
+}
+func rewriteValueARM_OpAnd32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (And32 x y)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (AND x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMAND)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (GreaterThan (InvertFlags x))
+}
+func rewriteValueARM_OpAnd8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (And8 x y)
        // cond:
-       // result: (LessThan x)
+       // result: (AND x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(OpARMLessThan)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMAND)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMGreaterThanU(v *Value, config *Config) bool {
+func rewriteValueARM_OpAndB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (GreaterThanU (FlagEQ))
+       // match: (AndB x y)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (AND x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMAND)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (BIC x (MOVWconst [c]))
+       // cond:
+       // result: (BICconst [c] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v_1.AuxInt
+               v.reset(OpARMBICconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (GreaterThanU (FlagLT_ULT))
+       // match: (BIC x (SLLconst [c] y))
        // cond:
-       // result: (MOVWconst [0])
+       // result: (BICshiftLL x y [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMBICshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (GreaterThanU (FlagLT_UGT))
+       // match: (BIC x (SRLconst [c] y))
        // cond:
-       // result: (MOVWconst [1])
+       // result: (BICshiftRL x y [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMBICshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (GreaterThanU (FlagGT_ULT))
+       // match: (BIC x (SRAconst [c] y))
        // cond:
-       // result: (MOVWconst [0])
+       // result: (BICshiftRA x y [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMBICshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (GreaterThanU (FlagGT_UGT))
+       // match: (BIC x (SLL y z))
        // cond:
-       // result: (MOVWconst [1])
+       // result: (BICshiftLLreg x y z)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMBICshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (GreaterThanU (InvertFlags x))
+       // match: (BIC x (SRL y z))
        // cond:
-       // result: (LessThanU x)
+       // result: (BICshiftRLreg x y z)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRL {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMLessThanU)
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMBICshiftRLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpHmul16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul16 x y)
+       // match: (BIC x (SRA y z))
        // cond:
-       // result: (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
+       // result: (BICshiftRAreg x y z)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRAconst)
-               v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32())
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v.AuxInt = 16
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRA {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMBICshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul16u x y)
+       // match: (BIC x x)
        // cond:
-       // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
+       // result: (MOVWconst [0])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRLconst)
-               v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32())
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v.AuxInt = 16
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
+       return false
 }
-func rewriteValueARM_OpHmul32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMBICconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Hmul32 x y)
+       // match: (BICconst [0] x)
        // cond:
-       // result: (HMUL x y)
+       // result: x
        for {
+               if v.AuxInt != 0 {
+                       break
+               }
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMHMUL)
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul32u x y)
+       // match: (BICconst [c] _)
+       // cond: int32(c)==-1
+       // result: (MOVWconst [0])
+       for {
+               c := v.AuxInt
+               if !(int32(c) == -1) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (BICconst [c] (MOVWconst [d]))
        // cond:
-       // result: (HMULU x y)
+       // result: (MOVWconst [d&^c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMHMULU)
-               v.AddArg(x)
-               v.AddArg(y)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = d &^ c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpHmul8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMBICshiftLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Hmul8 x y)
+       // match: (BICshiftLL x (MOVWconst [c]) [d])
        // cond:
-       // result: (SRAconst (MUL <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
+       // result: (BICconst x [int64(uint32(c)<<uint64(d))])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRAconst)
-               v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16())
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v.AuxInt = 8
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMBICconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) << uint64(d))
+               return true
+       }
+       // match: (BICshiftLL x (SLLconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
+                       break
+               }
+               if x != v_1.Args[0] {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
+       return false
 }
-func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMBICshiftLLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Hmul8u x y)
+       // match: (BICshiftLLreg x y (MOVWconst [c]))
        // cond:
-       // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
+       // result: (BICshiftLL x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSRLconst)
-               v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16())
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v.AuxInt = 8
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMBICshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpInterCall(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMBICshiftRA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (InterCall [argwid] entry mem)
+       // match: (BICshiftRA x (MOVWconst [c]) [d])
        // cond:
-       // result: (CALLinter [argwid] entry mem)
+       // result: (BICconst x [int64(int32(c)>>uint64(d))])
        for {
-               argwid := v.AuxInt
-               entry := v.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMCALLinter)
-               v.AuxInt = argwid
-               v.AddArg(entry)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMBICconst)
+               v.AddArg(x)
+               v.AuxInt = int64(int32(c) >> uint64(d))
                return true
        }
-}
-func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (IsInBounds idx len)
-       // cond:
-       // result: (LessThanU (CMP idx len))
+       // match: (BICshiftRA x (SRAconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
        for {
-               idx := v.Args[0]
-               len := v.Args[1]
-               v.reset(OpARMLessThanU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(idx)
-               v0.AddArg(len)
-               v.AddArg(v0)
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
+                       break
+               }
+               if x != v_1.Args[0] {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
+       return false
 }
-func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMBICshiftRAreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (IsNonNil ptr)
+       // match: (BICshiftRAreg x y (MOVWconst [c]))
        // cond:
-       // result: (NotEqual (CMPconst [0] ptr))
+       // result: (BICshiftRA x y [c])
        for {
-               ptr := v.Args[0]
-               v.reset(OpARMNotEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v0.AuxInt = 0
-               v0.AddArg(ptr)
-               v.AddArg(v0)
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMBICshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMBICshiftRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (IsSliceInBounds idx len)
+       // match: (BICshiftRL x (MOVWconst [c]) [d])
        // cond:
-       // result: (LessEqualU (CMP idx len))
+       // result: (BICconst x [int64(uint32(c)>>uint64(d))])
        for {
-               idx := v.Args[0]
-               len := v.Args[1]
-               v.reset(OpARMLessEqualU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(idx)
-               v0.AddArg(len)
-               v.AddArg(v0)
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMBICconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
                return true
        }
-}
-func rewriteValueARM_OpLeq16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq16 x y)
-       // cond:
-       // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+       // match: (BICshiftRL x (SRLconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
+                       break
+               }
+               if x != v_1.Args[0] {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
+       return false
 }
-func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMBICshiftRLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq16U x y)
+       // match: (BICshiftRLreg x y (MOVWconst [c]))
        // cond:
-       // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (BICshiftRL x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMLessEqualU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMBICshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpLeq32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq32 x y)
+       // match: (CMOVWHSconst _ (FlagEQ) [c])
        // cond:
-       // result: (LessEqual (CMP x y))
+       // result: (MOVWconst [c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMFlagEQ {
+                       break
+               }
+               c := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq32F x y)
+       // match: (CMOVWHSconst x (FlagLT_ULT))
        // cond:
-       // result: (GreaterEqual (CMPF y x))
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq32U x y)
+       // match: (CMOVWHSconst _ (FlagLT_UGT) [c])
        // cond:
-       // result: (LessEqualU (CMP x y))
+       // result: (MOVWconst [c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessEqualU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               c := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq64F x y)
+       // match: (CMOVWHSconst x (FlagGT_ULT))
        // cond:
-       // result: (GreaterEqual (CMPD y x))
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpLeq8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq8 x y)
+       // match: (CMOVWHSconst _ (FlagGT_UGT) [c])
        // cond:
-       // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+       // result: (MOVWconst [c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               c := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq8U x y)
+       // match: (CMOVWHSconst x (InvertFlags flags) [c])
        // cond:
-       // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (CMOVWLSconst x flags [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessEqualU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMInvertFlags {
+                       break
+               }
+               flags := v_1.Args[0]
+               c := v.AuxInt
+               v.reset(OpARMCMOVWLSconst)
+               v.AddArg(x)
+               v.AddArg(flags)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpLess16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less16 x y)
+       // match: (CMOVWLSconst _ (FlagEQ) [c])
        // cond:
-       // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+       // result: (MOVWconst [c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessThan)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMFlagEQ {
+                       break
+               }
+               c := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpLess16U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less16U x y)
+       // match: (CMOVWLSconst _ (FlagLT_ULT) [c])
        // cond:
-       // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (MOVWconst [c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessThanU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               c := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpLess32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less32 x y)
+       // match: (CMOVWLSconst x (FlagLT_UGT))
        // cond:
-       // result: (LessThan (CMP x y))
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessThan)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpLess32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less32F x y)
+       // match: (CMOVWLSconst _ (FlagGT_ULT) [c])
        // cond:
-       // result: (GreaterThan (CMPF y x))
+       // result: (MOVWconst [c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThan)
-               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               c := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpLess32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less32U x y)
+       // match: (CMOVWLSconst x (FlagGT_UGT))
        // cond:
-       // result: (LessThanU (CMP x y))
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessThanU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpLess64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less64F x y)
+       // match: (CMOVWLSconst x (InvertFlags flags) [c])
        // cond:
-       // result: (GreaterThan (CMPD y x))
+       // result: (CMOVWHSconst x flags [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThan)
-               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMInvertFlags {
+                       break
+               }
+               flags := v_1.Args[0]
+               c := v.AuxInt
+               v.reset(OpARMCMOVWHSconst)
+               v.AddArg(x)
+               v.AddArg(flags)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpLess8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less8 x y)
+       // match: (CMP x (MOVWconst [c]))
        // cond:
-       // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+       // result: (CMPconst [c] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessThan)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMCMPconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpLess8U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less8U x y)
+       // match: (CMP (MOVWconst [c]) x)
        // cond:
-       // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (InvertFlags (CMPconst [c] x))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessThanU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMInvertFlags)
+               v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v0.AuxInt = c
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpARMLessEqual(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (LessEqual (FlagEQ))
+       // match: (CMP x (SLLconst [c] y))
        // cond:
-       // result: (MOVWconst [1])
+       // result: (CMPshiftLL x y [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMCMPshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (LessEqual (FlagLT_ULT))
+       // match: (CMP (SLLconst [c] y) x)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (InvertFlags (CMPshiftLL x y [c]))
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               if v_0.Op != OpARMSLLconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMInvertFlags)
+               v0 := b.NewValue0(v.Line, OpARMCMPshiftLL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v0.AuxInt = c
+               v.AddArg(v0)
                return true
        }
-       // match: (LessEqual (FlagLT_UGT))
+       // match: (CMP x (SRLconst [c] y))
        // cond:
-       // result: (MOVWconst [1])
+       // result: (CMPshiftRL x y [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMCMPshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (LessEqual (FlagGT_ULT))
+       // match: (CMP (SRLconst [c] y) x)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (InvertFlags (CMPshiftRL x y [c]))
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               if v_0.Op != OpARMSRLconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMInvertFlags)
+               v0 := b.NewValue0(v.Line, OpARMCMPshiftRL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v0.AuxInt = c
+               v.AddArg(v0)
                return true
        }
-       // match: (LessEqual (FlagGT_UGT))
+       // match: (CMP x (SRAconst [c] y))
        // cond:
-       // result: (MOVWconst [0])
+       // result: (CMPshiftRA x y [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMCMPshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (LessEqual (InvertFlags x))
+       // match: (CMP (SRAconst [c] y) x)
        // cond:
-       // result: (GreaterEqual x)
+       // result: (InvertFlags (CMPshiftRA x y [c]))
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               if v_0.Op != OpARMSRAconst {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMGreaterEqual)
-               v.AddArg(x)
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMInvertFlags)
+               v0 := b.NewValue0(v.Line, OpARMCMPshiftRA, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v0.AuxInt = c
+               v.AddArg(v0)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMLessEqualU(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (LessEqualU (FlagEQ))
+       // match: (CMP x (SLL y z))
        // cond:
-       // result: (MOVWconst [1])
+       // result: (CMPshiftLLreg x y z)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMCMPshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (LessEqualU (FlagLT_ULT))
+       // match: (CMP (SLL y z) x)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (InvertFlags (CMPshiftLLreg x y z))
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               if v_0.Op != OpARMSLL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMInvertFlags)
+               v0 := b.NewValue0(v.Line, OpARMCMPshiftLLreg, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v0.AddArg(z)
+               v.AddArg(v0)
                return true
        }
-       // match: (LessEqualU (FlagLT_UGT))
+       // match: (CMP x (SRL y z))
        // cond:
-       // result: (MOVWconst [0])
+       // result: (CMPshiftRLreg x y z)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMCMPshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (LessEqualU (FlagGT_ULT))
+       // match: (CMP (SRL y z) x)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (InvertFlags (CMPshiftRLreg x y z))
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               if v_0.Op != OpARMSRL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMInvertFlags)
+               v0 := b.NewValue0(v.Line, OpARMCMPshiftRLreg, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v0.AddArg(z)
+               v.AddArg(v0)
                return true
        }
-       // match: (LessEqualU (FlagGT_UGT))
+       // match: (CMP x (SRA y z))
        // cond:
-       // result: (MOVWconst [0])
+       // result: (CMPshiftRAreg x y z)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRA {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMCMPshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (LessEqualU (InvertFlags x))
+       // match: (CMP (SRA y z) x)
        // cond:
-       // result: (GreaterEqualU x)
+       // result: (InvertFlags (CMPshiftRAreg x y z))
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               if v_0.Op != OpARMSRA {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMGreaterEqualU)
-               v.AddArg(x)
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMInvertFlags)
+               v0 := b.NewValue0(v.Line, OpARMCMPshiftRAreg, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v0.AddArg(z)
+               v.AddArg(v0)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMLessThan(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (LessThan (FlagEQ))
-       // cond:
-       // result: (MOVWconst [0])
+       // match: (CMPconst (MOVWconst [x]) [y])
+       // cond: int32(x)==int32(y)
+       // result: (FlagEQ)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
-               return true
-       }
-       // match: (LessThan (FlagLT_ULT))
-       // cond:
-       // result: (MOVWconst [1])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               x := v_0.AuxInt
+               y := v.AuxInt
+               if !(int32(x) == int32(y)) {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               v.reset(OpARMFlagEQ)
                return true
        }
-       // match: (LessThan (FlagLT_UGT))
-       // cond:
-       // result: (MOVWconst [1])
+       // match: (CMPconst (MOVWconst [x]) [y])
+       // cond: int32(x)<int32(y) && uint32(x)<uint32(y)
+       // result: (FlagLT_ULT)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               x := v_0.AuxInt
+               y := v.AuxInt
+               if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
+                       break
+               }
+               v.reset(OpARMFlagLT_ULT)
                return true
        }
-       // match: (LessThan (FlagGT_ULT))
-       // cond:
-       // result: (MOVWconst [0])
+       // match: (CMPconst (MOVWconst [x]) [y])
+       // cond: int32(x)<int32(y) && uint32(x)>uint32(y)
+       // result: (FlagLT_UGT)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               x := v_0.AuxInt
+               y := v.AuxInt
+               if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
+                       break
+               }
+               v.reset(OpARMFlagLT_UGT)
                return true
        }
-       // match: (LessThan (FlagGT_UGT))
-       // cond:
-       // result: (MOVWconst [0])
+       // match: (CMPconst (MOVWconst [x]) [y])
+       // cond: int32(x)>int32(y) && uint32(x)<uint32(y)
+       // result: (FlagGT_ULT)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               x := v_0.AuxInt
+               y := v.AuxInt
+               if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
+                       break
+               }
+               v.reset(OpARMFlagGT_ULT)
                return true
        }
-       // match: (LessThan (InvertFlags x))
-       // cond:
-       // result: (GreaterThan x)
+       // match: (CMPconst (MOVWconst [x]) [y])
+       // cond: int32(x)>int32(y) && uint32(x)>uint32(y)
+       // result: (FlagGT_UGT)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMGreaterThan)
-               v.AddArg(x)
+               x := v_0.AuxInt
+               y := v.AuxInt
+               if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
+                       break
+               }
+               v.reset(OpARMFlagGT_UGT)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMLessThanU(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (LessThanU (FlagEQ))
-       // cond:
-       // result: (MOVWconst [0])
+       // match: (CMPconst (MOVBUreg _) [c])
+       // cond: 0xff < c
+       // result: (FlagLT_ULT)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
+               if v_0.Op != OpARMMOVBUreg {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v.AuxInt
+               if !(0xff < c) {
+                       break
+               }
+               v.reset(OpARMFlagLT_ULT)
                return true
        }
-       // match: (LessThanU (FlagLT_ULT))
-       // cond:
-       // result: (MOVWconst [1])
+       // match: (CMPconst (MOVHUreg _) [c])
+       // cond: 0xffff < c
+       // result: (FlagLT_ULT)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               if v_0.Op != OpARMMOVHUreg {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v.AuxInt
+               if !(0xffff < c) {
+                       break
+               }
+               v.reset(OpARMFlagLT_ULT)
                return true
        }
-       // match: (LessThanU (FlagLT_UGT))
-       // cond:
-       // result: (MOVWconst [0])
+       // match: (CMPconst (ANDconst _ [m]) [n])
+       // cond: 0 <= int32(m) && int32(m) < int32(n)
+       // result: (FlagLT_ULT)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               if v_0.Op != OpARMANDconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               m := v_0.AuxInt
+               n := v.AuxInt
+               if !(0 <= int32(m) && int32(m) < int32(n)) {
+                       break
+               }
+               v.reset(OpARMFlagLT_ULT)
                return true
        }
-       // match: (LessThanU (FlagGT_ULT))
-       // cond:
-       // result: (MOVWconst [1])
+       // match: (CMPconst (SRLconst _ [c]) [n])
+       // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)
+       // result: (FlagLT_ULT)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               if v_0.Op != OpARMSRLconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
-               return true
-       }
-       // match: (LessThanU (FlagGT_UGT))
+               c := v_0.AuxInt
+               n := v.AuxInt
+               if !(0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)) {
+                       break
+               }
+               v.reset(OpARMFlagLT_ULT)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMCMPshiftLL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (CMPshiftLL (MOVWconst [c]) x [d])
        // cond:
-       // result: (MOVWconst [0])
+       // result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMInvertFlags)
+               v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v0.AuxInt = c
+               v1 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v1.AddArg(x)
+               v1.AuxInt = d
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (LessThanU (InvertFlags x))
+       // match: (CMPshiftLL x (MOVWconst [c]) [d])
        // cond:
-       // result: (GreaterThanU x)
+       // result: (CMPconst x [int64(uint32(c)<<uint64(d))])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMGreaterThanU)
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMCMPconst)
                v.AddArg(x)
+               v.AuxInt = int64(uint32(c) << uint64(d))
                return true
        }
        return false
 }
-func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMCMPshiftLLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Load <t> ptr mem)
-       // cond: t.IsBoolean()
-       // result: (MOVBUload ptr mem)
+       // match: (CMPshiftLLreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (InvertFlags (CMPconst [c] (SLL <x.Type> x y)))
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(t.IsBoolean()) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVBUload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMInvertFlags)
+               v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v0.AuxInt = c
+               v1 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v1.AddArg(x)
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is8BitInt(t) && isSigned(t))
-       // result: (MOVBload ptr mem)
+       // match: (CMPshiftLLreg x y (MOVWconst [c]))
+       // cond:
+       // result: (CMPshiftLL x y [c])
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is8BitInt(t) && isSigned(t)) {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVBload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_2.AuxInt
+               v.reset(OpARMCMPshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is8BitInt(t) && !isSigned(t))
-       // result: (MOVBUload ptr mem)
+       return false
+}
+func rewriteValueARM_OpARMCMPshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (CMPshiftRA (MOVWconst [c]) x [d])
+       // cond:
+       // result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is8BitInt(t) && !isSigned(t)) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVBUload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMInvertFlags)
+               v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v0.AuxInt = c
+               v1 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v1.AddArg(x)
+               v1.AuxInt = d
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is16BitInt(t) && isSigned(t))
-       // result: (MOVHload ptr mem)
+       // match: (CMPshiftRA x (MOVWconst [c]) [d])
+       // cond:
+       // result: (CMPconst x [int64(int32(c)>>uint64(d))])
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is16BitInt(t) && isSigned(t)) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVHload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMCMPconst)
+               v.AddArg(x)
+               v.AuxInt = int64(int32(c) >> uint64(d))
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is16BitInt(t) && !isSigned(t))
-       // result: (MOVHUload ptr mem)
+       return false
+}
+func rewriteValueARM_OpARMCMPshiftRAreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (CMPshiftRAreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (InvertFlags (CMPconst [c] (SRA <x.Type> x y)))
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is16BitInt(t) && !isSigned(t)) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVHUload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMInvertFlags)
+               v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v0.AuxInt = c
+               v1 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+               v1.AddArg(x)
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is32BitInt(t) || isPtr(t))
-       // result: (MOVWload ptr mem)
+       // match: (CMPshiftRAreg x y (MOVWconst [c]))
+       // cond:
+       // result: (CMPshiftRA x y [c])
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is32BitInt(t) || isPtr(t)) {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_2.AuxInt
+               v.reset(OpARMCMPshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: is32BitFloat(t)
-       // result: (MOVFload ptr mem)
+       return false
+}
+func rewriteValueARM_OpARMCMPshiftRL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (CMPshiftRL (MOVWconst [c]) x [d])
+       // cond:
+       // result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is32BitFloat(t)) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVFload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMInvertFlags)
+               v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v0.AuxInt = c
+               v1 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v1.AddArg(x)
+               v1.AuxInt = d
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: is64BitFloat(t)
-       // result: (MOVDload ptr mem)
+       // match: (CMPshiftRL x (MOVWconst [c]) [d])
+       // cond:
+       // result: (CMPconst x [int64(uint32(c)>>uint64(d))])
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is64BitFloat(t)) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVDload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMCMPconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMLoweredZeromask(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMCMPshiftRLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (LoweredZeromask (MOVWconst [0]))
+       // match: (CMPshiftRLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (InvertFlags (CMPconst [c] (SRL <x.Type> x y)))
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
-               if v_0.AuxInt != 0 {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMInvertFlags)
+               v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v0.AuxInt = c
+               v1 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v1.AddArg(x)
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (LoweredZeromask (MOVWconst [c]))
-       // cond: c != 0
-       // result: (MOVWconst [0xffffffff])
+       // match: (CMPshiftRLreg x y (MOVWconst [c]))
+       // cond:
+       // result: (CMPshiftRL x y [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               if !(c != 0) {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0xffffffff
+               c := v_2.AuxInt
+               v.reset(OpARMCMPshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
        return false
 }
-func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
+func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lrot16 <t> x [c])
+       // match: (ClosureCall [argwid] entry closure mem)
        // cond:
-       // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
+       // result: (CALLclosure [argwid] entry closure mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               c := v.AuxInt
-               v.reset(OpARMOR)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
-               v0.AddArg(x)
-               v0.AuxInt = c & 15
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
-               v1.AddArg(x)
-               v1.AuxInt = 16 - c&15
-               v.AddArg(v1)
+               argwid := v.AuxInt
+               entry := v.Args[0]
+               closure := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMCALLclosure)
+               v.AuxInt = argwid
+               v.AddArg(entry)
+               v.AddArg(closure)
+               v.AddArg(mem)
                return true
        }
 }
-func rewriteValueARM_OpLrot32(v *Value, config *Config) bool {
+func rewriteValueARM_OpCom16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lrot32 x [c])
+       // match: (Com16 x)
        // cond:
-       // result: (SRRconst x [32-c&31])
+       // result: (MVN x)
        for {
                x := v.Args[0]
-               c := v.AuxInt
-               v.reset(OpARMSRRconst)
+               v.reset(OpARMMVN)
                v.AddArg(x)
-               v.AuxInt = 32 - c&31
                return true
        }
 }
-func rewriteValueARM_OpLrot8(v *Value, config *Config) bool {
+func rewriteValueARM_OpCom32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lrot8 <t> x [c])
+       // match: (Com32 x)
        // cond:
-       // result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
+       // result: (MVN x)
        for {
-               t := v.Type
                x := v.Args[0]
-               c := v.AuxInt
-               v.reset(OpARMOR)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
-               v0.AddArg(x)
-               v0.AuxInt = c & 7
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
-               v1.AddArg(x)
-               v1.AuxInt = 8 - c&7
-               v.AddArg(v1)
+               v.reset(OpARMMVN)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
+func rewriteValueARM_OpCom8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh16x16 x y)
+       // match: (Com8 x)
        // cond:
-       // result: (SLL x (ZeroExt16to32 y))
+       // result: (MVN x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSLL)
+               v.reset(OpARMMVN)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v0.AddArg(y)
-               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
+func rewriteValueARM_OpConst16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh16x32 x y)
+       // match: (Const16 [val])
        // cond:
-       // result: (SLL x y)
+       // result: (MOVWconst [val])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSLL)
-               v.AddArg(x)
-               v.AddArg(y)
+               val := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = val
                return true
        }
 }
-func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool {
+func rewriteValueARM_OpConst32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh16x64 x (Const64 [c]))
-       // cond: uint64(c) < 16
-       // result: (SLLconst x [c])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 16) {
-                       break
-               }
-               v.reset(OpARMSLLconst)
-               v.AddArg(x)
-               v.AuxInt = c
-               return true
-       }
-       // match: (Lsh16x64 _ (Const64 [c]))
-       // cond: uint64(c) >= 16
-       // result: (Const16 [0])
+       // match: (Const32 [val])
+       // cond:
+       // result: (MOVWconst [val])
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 16) {
-                       break
-               }
-               v.reset(OpConst16)
-               v.AuxInt = 0
+               val := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = val
                return true
        }
-       return false
 }
-func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpConst32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh16x8  x y)
+       // match: (Const32F [val])
        // cond:
-       // result: (SLL x (ZeroExt8to32 y))
+       // result: (MOVFconst [val])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSLL)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v0.AddArg(y)
-               v.AddArg(v0)
+               val := v.AuxInt
+               v.reset(OpARMMOVFconst)
+               v.AuxInt = val
                return true
        }
 }
-func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
+func rewriteValueARM_OpConst64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh32x16 x y)
+       // match: (Const64F [val])
        // cond:
-       // result: (SLL x (ZeroExt16to32 y))
+       // result: (MOVDconst [val])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSLL)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v0.AddArg(y)
-               v.AddArg(v0)
+               val := v.AuxInt
+               v.reset(OpARMMOVDconst)
+               v.AuxInt = val
                return true
        }
 }
-func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
+func rewriteValueARM_OpConst8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh32x32 x y)
+       // match: (Const8 [val])
        // cond:
-       // result: (SLL x y)
+       // result: (MOVWconst [val])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSLL)
-               v.AddArg(x)
-               v.AddArg(y)
+               val := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = val
                return true
        }
 }
-func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool {
+func rewriteValueARM_OpConstBool(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh32x64 x (Const64 [c]))
-       // cond: uint64(c) < 32
-       // result: (SLLconst x [c])
+       // match: (ConstBool [b])
+       // cond:
+       // result: (MOVWconst [b])
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 32) {
-                       break
-               }
-               v.reset(OpARMSLLconst)
-               v.AddArg(x)
-               v.AuxInt = c
+               b := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = b
                return true
        }
-       // match: (Lsh32x64 _ (Const64 [c]))
-       // cond: uint64(c) >= 32
-       // result: (Const32 [0])
+}
+func rewriteValueARM_OpConstNil(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ConstNil)
+       // cond:
+       // result: (MOVWconst [0])
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 32) {
-                       break
-               }
-               v.reset(OpConst32)
+               v.reset(OpARMMOVWconst)
                v.AuxInt = 0
                return true
        }
-       return false
 }
-func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpConvert(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh32x8  x y)
+       // match: (Convert x mem)
        // cond:
-       // result: (SLL x (ZeroExt8to32 y))
+       // result: (MOVWconvert x mem)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSLL)
+               mem := v.Args[1]
+               v.reset(OpARMMOVWconvert)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
 }
-func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
+func rewriteValueARM_OpCvt32Fto32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh8x16 x y)
+       // match: (Cvt32Fto32 x)
        // cond:
-       // result: (SLL x (ZeroExt16to32 y))
+       // result: (MOVFW x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSLL)
+               v.reset(OpARMMOVFW)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v0.AddArg(y)
-               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
+func rewriteValueARM_OpCvt32Fto32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh8x32 x y)
+       // match: (Cvt32Fto32U x)
        // cond:
-       // result: (SLL x y)
+       // result: (MOVFWU x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSLL)
+               v.reset(OpARMMOVFWU)
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool {
+func rewriteValueARM_OpCvt32Fto64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh8x64 x (Const64 [c]))
-       // cond: uint64(c) < 8
-       // result: (SLLconst x [c])
+       // match: (Cvt32Fto64F x)
+       // cond:
+       // result: (MOVFD x)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 8) {
-                       break
-               }
-               v.reset(OpARMSLLconst)
+               v.reset(OpARMMOVFD)
                v.AddArg(x)
-               v.AuxInt = c
-               return true
-       }
-       // match: (Lsh8x64 _ (Const64 [c]))
-       // cond: uint64(c) >= 8
-       // result: (Const8 [0])
-       for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 8) {
-                       break
-               }
-               v.reset(OpConst8)
-               v.AuxInt = 0
                return true
        }
-       return false
 }
-func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpCvt32Uto32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh8x8  x y)
+       // match: (Cvt32Uto32F x)
        // cond:
-       // result: (SLL x (ZeroExt8to32 y))
+       // result: (MOVWUF x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSLL)
+               v.reset(OpARMMOVWUF)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v0.AddArg(y)
-               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM_OpARMMOVBUload(v *Value, config *Config) bool {
+func rewriteValueARM_OpCvt32Uto64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (Cvt32Uto64F x)
        // cond:
-       // result: (MOVBUload [off1+off2] {sym} ptr mem)
+       // result: (MOVWUD x)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMMOVBUload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpARMMOVWUD)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+}
+func rewriteValueARM_OpCvt32to32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt32to32F x)
+       // cond:
+       // result: (MOVWF x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpARMMOVBUload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpARMMOVWF)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)
-       // result: x
+}
+func rewriteValueARM_OpCvt32to64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt32to64F x)
+       // cond:
+       // result: (MOVWD x)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVBstore {
-                       break
-               }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               x := v.Args[0]
+               v.reset(OpARMMOVWD)
                v.AddArg(x)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMMOVBUreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpCvt64Fto32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBUreg x:(MOVBUload _ _))
+       // match: (Cvt64Fto32 x)
        // cond:
-       // result: (MOVWreg x)
+       // result: (MOVDW x)
        for {
                x := v.Args[0]
-               if x.Op != OpARMMOVBUload {
-                       break
-               }
-               v.reset(OpARMMOVWreg)
+               v.reset(OpARMMOVDW)
                v.AddArg(x)
                return true
        }
-       // match: (MOVBUreg (ANDconst [c] x))
+}
+func rewriteValueARM_OpCvt64Fto32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt64Fto32F x)
        // cond:
-       // result: (ANDconst [c&0xff] x)
+       // result: (MOVDF x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMANDconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMANDconst)
-               v.AuxInt = c & 0xff
+               x := v.Args[0]
+               v.reset(OpARMMOVDF)
                v.AddArg(x)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMMOVBload(v *Value, config *Config) bool {
+func rewriteValueARM_OpCvt64Fto32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (Cvt64Fto32U x)
        // cond:
-       // result: (MOVBload [off1+off2] {sym} ptr mem)
+       // result: (MOVDWU x)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMMOVBload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpARMMOVDWU)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+}
+func rewriteValueARM_OpARMDIV(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (DIV (MOVWconst [c]) (MOVWconst [d]))
+       // cond:
+       // result: (MOVWconst [int64(int32(c)/int32(d))])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_0.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVBload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               d := v_1.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(int32(c) / int32(d))
                return true
        }
-       // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)
+       return false
+}
+func rewriteValueARM_OpARMDIVU(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (DIVU x (MOVWconst [1]))
+       // cond:
        // result: x
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVBstore {
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) {
+               if v_1.AuxInt != 1 {
                        break
                }
                v.reset(OpCopy)
@@ -4235,1019 +4855,8777 @@ func rewriteValueARM_OpARMMOVBload(v *Value, config *Config) bool {
                v.AddArg(x)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMOVBreg(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBreg x:(MOVBload _ _))
-       // cond:
-       // result: (MOVWreg x)
+       // match: (DIVU x (MOVWconst [c]))
+       // cond: isPowerOfTwo(c)
+       // result: (SRLconst [log2(c)] x)
        for {
                x := v.Args[0]
-               if x.Op != OpARMMOVBload {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWreg)
+               c := v_1.AuxInt
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpARMSRLconst)
+               v.AuxInt = log2(c)
                v.AddArg(x)
                return true
        }
-       // match: (MOVBreg (ANDconst [c] x))
-       // cond: c & 0x80 == 0
-       // result: (ANDconst [c&0x7f] x)
+       // match: (DIVU (MOVWconst [c]) (MOVWconst [d]))
+       // cond:
+       // result: (MOVWconst [int64(uint32(c)/uint32(d))])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMANDconst {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
-               x := v_0.Args[0]
-               if !(c&0x80 == 0) {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMANDconst)
-               v.AuxInt = c & 0x7f
-               v.AddArg(x)
+               d := v_1.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(uint32(c) / uint32(d))
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMOVBstore(v *Value, config *Config) bool {
+func rewriteValueARM_OpDeferCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // match: (DeferCall [argwid] mem)
        // cond:
-       // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+       // result: (CALLdefer [argwid] mem)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
+               argwid := v.AuxInt
+               mem := v.Args[0]
+               v.reset(OpARMCALLdefer)
+               v.AuxInt = argwid
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+}
+func rewriteValueARM_OpDiv16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div16 x y)
        // cond:
-       // result: (MOVBstore [off] {sym} ptr x mem)
+       // result: (DIV (SignExt16to32 x) (SignExt16to32 y))
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVBreg {
-                       break
-               }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(x)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMDIV)
+               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+}
+func rewriteValueARM_OpDiv16u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div16u x y)
        // cond:
-       // result: (MOVBstore [off] {sym} ptr x mem)
+       // result: (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVBUreg {
-                       break
-               }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(x)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMDIVU)
+               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+}
+func rewriteValueARM_OpDiv32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div32 x y)
        // cond:
-       // result: (MOVBstore [off] {sym} ptr x mem)
+       // result: (DIV x y)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVHreg {
-                       break
-               }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMDIV)
                v.AddArg(x)
-               v.AddArg(mem)
+               v.AddArg(y)
                return true
        }
-       // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+}
+func rewriteValueARM_OpDiv32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div32F x y)
        // cond:
-       // result: (MOVBstore [off] {sym} ptr x mem)
+       // result: (DIVF x y)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVHUreg {
-                       break
-               }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMDIVF)
                v.AddArg(x)
-               v.AddArg(mem)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMMOVDload(v *Value, config *Config) bool {
+func rewriteValueARM_OpDiv32u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (Div32u x y)
        // cond:
-       // result: (MOVDload [off1+off2] {sym} ptr mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMMOVDload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // result: (DIVU x y)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpARMMOVDload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMDIVU)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-       // result: x
+}
+func rewriteValueARM_OpDiv64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div64F x y)
+       // cond:
+       // result: (DIVD x y)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVDstore {
-                       break
-               }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMDIVD)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMMOVDstore(v *Value, config *Config) bool {
+func rewriteValueARM_OpDiv8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // match: (Div8 x y)
        // cond:
-       // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+       // result: (DIV (SignExt8to32 x) (SignExt8to32 y))
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVDstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMDIV)
+               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM_OpDiv8u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div8u x y)
+       // cond:
+       // result: (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMDIVU)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM_OpEq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq16 x y)
+       // cond:
+       // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpEq32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq32 x y)
+       // cond:
+       // result: (Equal (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpEq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq32F x y)
+       // cond:
+       // result: (Equal (CMPF x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpEq64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq64F x y)
+       // cond:
+       // result: (Equal (CMPD x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpEq8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq8 x y)
+       // cond:
+       // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpEqB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (EqB x y)
+       // cond:
+       // result: (XORconst [1] (XOR <config.fe.TypeBool()> x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMXORconst)
+               v.AuxInt = 1
+               v0 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeBool())
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpEqPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (EqPtr x y)
+       // cond:
+       // result: (Equal (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpARMEqual(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Equal (FlagEQ))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (Equal (FlagLT_ULT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (Equal (FlagLT_UGT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (Equal (FlagGT_ULT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (Equal (FlagGT_UGT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (Equal (InvertFlags x))
+       // cond:
+       // result: (Equal x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMEqual)
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpGeq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq16 x y)
+       // cond:
+       // result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpGeq16U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq16U x y)
+       // cond:
+       // result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterEqualU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpGeq32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq32 x y)
+       // cond:
+       // result: (GreaterEqual (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpGeq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq32F x y)
+       // cond:
+       // result: (GreaterEqual (CMPF x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpGeq32U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq32U x y)
+       // cond:
+       // result: (GreaterEqualU (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterEqualU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpGeq64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq64F x y)
+       // cond:
+       // result: (GreaterEqual (CMPD x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpGeq8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq8 x y)
+       // cond:
+       // result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpGeq8U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq8U x y)
+       // cond:
+       // result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterEqualU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpGetClosurePtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (GetClosurePtr)
+       // cond:
+       // result: (LoweredGetClosurePtr)
+       for {
+               v.reset(OpARMLoweredGetClosurePtr)
+               return true
+       }
+}
+func rewriteValueARM_OpGoCall(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (GoCall [argwid] mem)
+       // cond:
+       // result: (CALLgo [argwid] mem)
+       for {
+               argwid := v.AuxInt
+               mem := v.Args[0]
+               v.reset(OpARMCALLgo)
+               v.AuxInt = argwid
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValueARM_OpGreater16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater16 x y)
+       // cond:
+       // result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterThan)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpGreater16U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater16U x y)
+       // cond:
+       // result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterThanU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpGreater32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater32 x y)
+       // cond:
+       // result: (GreaterThan (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterThan)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpGreater32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater32F x y)
+       // cond:
+       // result: (GreaterThan (CMPF x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterThan)
+               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpGreater32U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater32U x y)
+       // cond:
+       // result: (GreaterThanU (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterThanU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpGreater64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater64F x y)
+       // cond:
+       // result: (GreaterThan (CMPD x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterThan)
+               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpGreater8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater8 x y)
+       // cond:
+       // result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterThan)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpGreater8U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater8U x y)
+       // cond:
+       // result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterThanU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpARMGreaterEqual(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (GreaterEqual (FlagEQ))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (GreaterEqual (FlagLT_ULT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (GreaterEqual (FlagLT_UGT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (GreaterEqual (FlagGT_ULT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (GreaterEqual (FlagGT_UGT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (GreaterEqual (InvertFlags x))
+       // cond:
+       // result: (LessEqual x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMLessEqual)
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMGreaterEqualU(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (GreaterEqualU (FlagEQ))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (GreaterEqualU (FlagLT_ULT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (GreaterEqualU (FlagLT_UGT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (GreaterEqualU (FlagGT_ULT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (GreaterEqualU (FlagGT_UGT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (GreaterEqualU (InvertFlags x))
+       // cond:
+       // result: (LessEqualU x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMLessEqualU)
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMGreaterThan(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (GreaterThan (FlagEQ))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (GreaterThan (FlagLT_ULT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (GreaterThan (FlagLT_UGT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (GreaterThan (FlagGT_ULT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (GreaterThan (FlagGT_UGT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (GreaterThan (InvertFlags x))
+       // cond:
+       // result: (LessThan x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMLessThan)
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMGreaterThanU(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (GreaterThanU (FlagEQ))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (GreaterThanU (FlagLT_ULT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (GreaterThanU (FlagLT_UGT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (GreaterThanU (FlagGT_ULT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (GreaterThanU (FlagGT_UGT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (GreaterThanU (InvertFlags x))
+       // cond:
+       // result: (LessThanU x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMLessThanU)
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpHmul16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul16 x y)
+       // cond:
+       // result: (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRAconst)
+               v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32())
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v.AuxInt = 16
+               return true
+       }
+}
+func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul16u x y)
+       // cond:
+       // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRLconst)
+               v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32())
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v.AuxInt = 16
+               return true
+       }
+}
+func rewriteValueARM_OpHmul32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul32 x y)
+       // cond:
+       // result: (HMUL x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMHMUL)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul32u x y)
+       // cond:
+       // result: (HMULU x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMHMULU)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpHmul8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul8 x y)
+       // cond:
+       // result: (SRAconst (MUL <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRAconst)
+               v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16())
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v.AuxInt = 8
+               return true
+       }
+}
+func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul8u x y)
+       // cond:
+       // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRLconst)
+               v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16())
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v.AuxInt = 8
+               return true
+       }
+}
+func rewriteValueARM_OpInterCall(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (InterCall [argwid] entry mem)
+       // cond:
+       // result: (CALLinter [argwid] entry mem)
+       for {
+               argwid := v.AuxInt
+               entry := v.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMCALLinter)
+               v.AuxInt = argwid
+               v.AddArg(entry)
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (IsInBounds idx len)
+       // cond:
+       // result: (LessThanU (CMP idx len))
+       for {
+               idx := v.Args[0]
+               len := v.Args[1]
+               v.reset(OpARMLessThanU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(idx)
+               v0.AddArg(len)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (IsNonNil ptr)
+       // cond:
+       // result: (NotEqual (CMPconst [0] ptr))
+       for {
+               ptr := v.Args[0]
+               v.reset(OpARMNotEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v0.AuxInt = 0
+               v0.AddArg(ptr)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (IsSliceInBounds idx len)
+       // cond:
+       // result: (LessEqualU (CMP idx len))
+       for {
+               idx := v.Args[0]
+               len := v.Args[1]
+               v.reset(OpARMLessEqualU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(idx)
+               v0.AddArg(len)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLeq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq16 x y)
+       // cond:
+       // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq16U x y)
+       // cond:
+       // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessEqualU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLeq32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq32 x y)
+       // cond:
+       // result: (LessEqual (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq32F x y)
+       // cond:
+       // result: (GreaterEqual (CMPF y x))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+               v0.AddArg(y)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq32U x y)
+       // cond:
+       // result: (LessEqualU (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessEqualU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq64F x y)
+       // cond:
+       // result: (GreaterEqual (CMPD y x))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+               v0.AddArg(y)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLeq8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq8 x y)
+       // cond:
+       // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq8U x y)
+       // cond:
+       // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessEqualU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLess16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less16 x y)
+       // cond:
+       // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessThan)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLess16U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less16U x y)
+       // cond:
+       // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessThanU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLess32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less32 x y)
+       // cond:
+       // result: (LessThan (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessThan)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLess32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less32F x y)
+       // cond:
+       // result: (GreaterThan (CMPF y x))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterThan)
+               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+               v0.AddArg(y)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLess32U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less32U x y)
+       // cond:
+       // result: (LessThanU (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessThanU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLess64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less64F x y)
+       // cond:
+       // result: (GreaterThan (CMPD y x))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterThan)
+               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+               v0.AddArg(y)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLess8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less8 x y)
+       // cond:
+       // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessThan)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLess8U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less8U x y)
+       // cond:
+       // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessThanU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpARMLessEqual(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (LessEqual (FlagEQ))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (LessEqual (FlagLT_ULT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (LessEqual (FlagLT_UGT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (LessEqual (FlagGT_ULT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (LessEqual (FlagGT_UGT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (LessEqual (InvertFlags x))
+       // cond:
+       // result: (GreaterEqual x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMGreaterEqual)
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMLessEqualU(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (LessEqualU (FlagEQ))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (LessEqualU (FlagLT_ULT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (LessEqualU (FlagLT_UGT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (LessEqualU (FlagGT_ULT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (LessEqualU (FlagGT_UGT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (LessEqualU (InvertFlags x))
+       // cond:
+       // result: (GreaterEqualU x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMGreaterEqualU)
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMLessThan(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (LessThan (FlagEQ))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (LessThan (FlagLT_ULT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (LessThan (FlagLT_UGT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (LessThan (FlagGT_ULT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (LessThan (FlagGT_UGT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (LessThan (InvertFlags x))
+       // cond:
+       // result: (GreaterThan x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMGreaterThan)
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMLessThanU(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (LessThanU (FlagEQ))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (LessThanU (FlagLT_ULT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (LessThanU (FlagLT_UGT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (LessThanU (FlagGT_ULT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (LessThanU (FlagGT_UGT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (LessThanU (InvertFlags x))
+       // cond:
+       // result: (GreaterThanU x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMGreaterThanU)
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Load <t> ptr mem)
+       // cond: t.IsBoolean()
+       // result: (MOVBUload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(t.IsBoolean()) {
+                       break
+               }
+               v.reset(OpARMMOVBUload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: (is8BitInt(t) && isSigned(t))
+       // result: (MOVBload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is8BitInt(t) && isSigned(t)) {
+                       break
+               }
+               v.reset(OpARMMOVBload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: (is8BitInt(t) && !isSigned(t))
+       // result: (MOVBUload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is8BitInt(t) && !isSigned(t)) {
+                       break
+               }
+               v.reset(OpARMMOVBUload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: (is16BitInt(t) && isSigned(t))
+       // result: (MOVHload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is16BitInt(t) && isSigned(t)) {
+                       break
+               }
+               v.reset(OpARMMOVHload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: (is16BitInt(t) && !isSigned(t))
+       // result: (MOVHUload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is16BitInt(t) && !isSigned(t)) {
+                       break
+               }
+               v.reset(OpARMMOVHUload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: (is32BitInt(t) || isPtr(t))
+       // result: (MOVWload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is32BitInt(t) || isPtr(t)) {
+                       break
+               }
+               v.reset(OpARMMOVWload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: is32BitFloat(t)
+       // result: (MOVFload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is32BitFloat(t)) {
+                       break
+               }
+               v.reset(OpARMMOVFload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: is64BitFloat(t)
+       // result: (MOVDload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is64BitFloat(t)) {
+                       break
+               }
+               v.reset(OpARMMOVDload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lrot16 <t> x [c])
+       // cond:
+       // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               c := v.AuxInt
+               v.reset(OpARMOR)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+               v0.AddArg(x)
+               v0.AuxInt = c & 15
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
+               v1.AddArg(x)
+               v1.AuxInt = 16 - c&15
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM_OpLrot32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lrot32 x [c])
+       // cond:
+       // result: (SRRconst x [32-c&31])
+       for {
+               x := v.Args[0]
+               c := v.AuxInt
+               v.reset(OpARMSRRconst)
+               v.AddArg(x)
+               v.AuxInt = 32 - c&31
+               return true
+       }
+}
+func rewriteValueARM_OpLrot8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lrot8 <t> x [c])
+       // cond:
+       // result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               c := v.AuxInt
+               v.reset(OpARMOR)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+               v0.AddArg(x)
+               v0.AuxInt = c & 7
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
+               v1.AddArg(x)
+               v1.AuxInt = 8 - c&7
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh16x16 x y)
+       // cond:
+       // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v2.AuxInt = 256
+               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v.AddArg(v2)
+               v.AuxInt = 0
+               return true
+       }
+}
+func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh16x32 x y)
+       // cond:
+       // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v1.AuxInt = 256
+               v1.AddArg(y)
+               v.AddArg(v1)
+               v.AuxInt = 0
+               return true
+       }
+}
+func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh16x64 x (Const64 [c]))
+       // cond: uint64(c) < 16
+       // result: (SLLconst x [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 16) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
+               v.AddArg(x)
+               v.AuxInt = c
+               return true
+       }
+       // match: (Lsh16x64 _ (Const64 [c]))
+       // cond: uint64(c) >= 16
+       // result: (Const16 [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 16) {
+                       break
+               }
+               v.reset(OpConst16)
+               v.AuxInt = 0
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh16x8  x y)
+       // cond:
+       // result: (SLL x (ZeroExt8to32 y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSLL)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh32x16 x y)
+       // cond:
+       // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v2.AuxInt = 256
+               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v.AddArg(v2)
+               v.AuxInt = 0
+               return true
+       }
+}
+func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh32x32 x y)
+       // cond:
+       // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v1.AuxInt = 256
+               v1.AddArg(y)
+               v.AddArg(v1)
+               v.AuxInt = 0
+               return true
+       }
+}
+func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh32x64 x (Const64 [c]))
+       // cond: uint64(c) < 32
+       // result: (SLLconst x [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 32) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
+               v.AddArg(x)
+               v.AuxInt = c
+               return true
+       }
+       // match: (Lsh32x64 _ (Const64 [c]))
+       // cond: uint64(c) >= 32
+       // result: (Const32 [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 32) {
+                       break
+               }
+               v.reset(OpConst32)
+               v.AuxInt = 0
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh32x8  x y)
+       // cond:
+       // result: (SLL x (ZeroExt8to32 y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSLL)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x16 x y)
+       // cond:
+       // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v2.AuxInt = 256
+               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v.AddArg(v2)
+               v.AuxInt = 0
+               return true
+       }
+}
+func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x32 x y)
+       // cond:
+       // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v1.AuxInt = 256
+               v1.AddArg(y)
+               v.AddArg(v1)
+               v.AuxInt = 0
+               return true
+       }
+}
+func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x64 x (Const64 [c]))
+       // cond: uint64(c) < 8
+       // result: (SLLconst x [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 8) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
+               v.AddArg(x)
+               v.AuxInt = c
+               return true
+       }
+       // match: (Lsh8x64 _ (Const64 [c]))
+       // cond: uint64(c) >= 8
+       // result: (Const8 [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 8) {
+                       break
+               }
+               v.reset(OpConst8)
+               v.AuxInt = 0
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x8  x y)
+       // cond:
+       // result: (SLL x (ZeroExt8to32 y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSLL)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpARMMOVBUload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // cond:
+       // result: (MOVBUload [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMMOVBUload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVBUload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)
+       // result: x
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVBstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVBUreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVBUreg x:(MOVBUload _ _))
+       // cond:
+       // result: (MOVWreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpARMMOVBUload {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVBUreg (ANDconst [c] x))
+       // cond:
+       // result: (ANDconst [c&0xff] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMANDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMANDconst)
+               v.AuxInt = c & 0xff
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVBUreg x:(MOVBUreg _))
+       // cond:
+       // result: (MOVWreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpARMMOVBUreg {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVBUreg (MOVWconst [c]))
+       // cond:
+       // result: (MOVWconst [int64(uint8(c))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(uint8(c))
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVBload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // cond:
+       // result: (MOVBload [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMMOVBload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVBload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)
+       // result: x
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVBstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVBreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVBreg x:(MOVBload _ _))
+       // cond:
+       // result: (MOVWreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpARMMOVBload {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVBreg (ANDconst [c] x))
+       // cond: c & 0x80 == 0
+       // result: (ANDconst [c&0x7f] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMANDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(c&0x80 == 0) {
+                       break
+               }
+               v.reset(OpARMANDconst)
+               v.AuxInt = c & 0x7f
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVBreg x:(MOVBreg _))
+       // cond:
+       // result: (MOVWreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpARMMOVBreg {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVBreg (MOVWconst [c]))
+       // cond:
+       // result: (MOVWconst [int64(int8(c))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(int8(c))
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVBstore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // cond:
+       // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+       // cond:
+       // result: (MOVBstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVBreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+       // cond:
+       // result: (MOVBstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVBUreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+       // cond:
+       // result: (MOVBstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVHreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+       // cond:
+       // result: (MOVBstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVHUreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVDload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // cond:
+       // result: (MOVDload [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMMOVDload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVDload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVDstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVDstore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // cond:
+       // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
        // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
        // cond: canMergeSym(sym1,sym2)
-       // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVFload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // cond:
+       // result: (MOVFload [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMMOVFload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVFload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVFstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVFstore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // cond:
+       // result: (MOVFstore [off1+off2] {sym} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVFstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVFstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVHUload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // cond:
+       // result: (MOVHUload [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMMOVHUload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVHUload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)
+       // result: x
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVHstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVHUreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVHUreg x:(MOVBUload _ _))
+       // cond:
+       // result: (MOVWreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpARMMOVBUload {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHUreg x:(MOVHUload _ _))
+       // cond:
+       // result: (MOVWreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpARMMOVHUload {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHUreg (ANDconst [c] x))
+       // cond:
+       // result: (ANDconst [c&0xffff] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMANDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMANDconst)
+               v.AuxInt = c & 0xffff
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHUreg x:(MOVBUreg _))
+       // cond:
+       // result: (MOVWreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpARMMOVBUreg {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHUreg x:(MOVHUreg _))
+       // cond:
+       // result: (MOVWreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpARMMOVHUreg {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHUreg (MOVWconst [c]))
+       // cond:
+       // result: (MOVWconst [int64(uint16(c))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(uint16(c))
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVHload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // cond:
+       // result: (MOVHload [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMMOVHload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVHload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)
+       // result: x
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVHstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVHreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVHreg x:(MOVBload _ _))
+       // cond:
+       // result: (MOVWreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpARMMOVBload {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHreg x:(MOVBUload _ _))
+       // cond:
+       // result: (MOVWreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpARMMOVBUload {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHreg x:(MOVHload _ _))
+       // cond:
+       // result: (MOVWreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpARMMOVHload {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHreg (ANDconst [c] x))
+       // cond: c & 0x8000 == 0
+       // result: (ANDconst [c&0x7fff] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMANDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(c&0x8000 == 0) {
+                       break
+               }
+               v.reset(OpARMANDconst)
+               v.AuxInt = c & 0x7fff
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHreg x:(MOVBreg _))
+       // cond:
+       // result: (MOVWreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpARMMOVBreg {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHreg x:(MOVBUreg _))
+       // cond:
+       // result: (MOVWreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpARMMOVBUreg {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHreg x:(MOVHreg _))
+       // cond:
+       // result: (MOVWreg x)
+       for {
+               x := v.Args[0]
+               if x.Op != OpARMMOVHreg {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVHreg (MOVWconst [c]))
+       // cond:
+       // result: (MOVWconst [int64(int16(c))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(int16(c))
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVHstore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // cond:
+       // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVHstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVHstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+       // cond:
+       // result: (MOVHstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVHreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARMMOVHstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+       // cond:
+       // result: (MOVHstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVHUreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARMMOVHstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // cond:
+       // result: (MOVWload [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMMOVWload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVWload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWload [0] {sym} (ADD ptr idx) mem)
+       // cond: sym == nil
+       // result: (MOVWloadidx ptr idx mem)
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADD {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(sym == nil) {
+                       break
+               }
+               v.reset(OpARMMOVWloadidx)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem)
+       // cond: sym == nil
+       // result: (MOVWloadshiftLL ptr idx [c] mem)
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDshiftLL {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               c := v_0.AuxInt
+               mem := v.Args[1]
+               if !(sym == nil) {
+                       break
+               }
+               v.reset(OpARMMOVWloadshiftLL)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem)
+       // cond: sym == nil
+       // result: (MOVWloadshiftRL ptr idx [c] mem)
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDshiftRL {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               c := v_0.AuxInt
+               mem := v.Args[1]
+               if !(sym == nil) {
+                       break
+               }
+               v.reset(OpARMMOVWloadshiftRL)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem)
+       // cond: sym == nil
+       // result: (MOVWloadshiftRA ptr idx [c] mem)
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDshiftRA {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               c := v_0.AuxInt
+               mem := v.Args[1]
+               if !(sym == nil) {
+                       break
+               }
+               v.reset(OpARMMOVWloadshiftRA)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _))
+       // cond: isSamePtr(ptr, ptr2)
+       // result: x
+       for {
+               ptr := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWstoreidx {
+                       break
+               }
+               ptr2 := v_2.Args[0]
+               if idx != v_2.Args[1] {
+                       break
+               }
+               x := v_2.Args[2]
+               if !(isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWloadidx ptr (MOVWconst [c]) mem)
+       // cond:
+       // result: (MOVWload [c] ptr mem)
+       for {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               v.reset(OpARMMOVWload)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWloadidx (MOVWconst [c]) ptr mem)
+       // cond:
+       // result: (MOVWload [c] ptr mem)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               ptr := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVWload)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWloadidx ptr (SLLconst idx [c]) mem)
+       // cond:
+       // result: (MOVWloadshiftLL ptr idx [c] mem)
+       for {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
+                       break
+               }
+               idx := v_1.Args[0]
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               v.reset(OpARMMOVWloadshiftLL)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWloadidx (SLLconst idx [c]) ptr mem)
+       // cond:
+       // result: (MOVWloadshiftLL ptr idx [c] mem)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLLconst {
+                       break
+               }
+               idx := v_0.Args[0]
+               c := v_0.AuxInt
+               ptr := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVWloadshiftLL)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWloadidx ptr (SRLconst idx [c]) mem)
+       // cond:
+       // result: (MOVWloadshiftRL ptr idx [c] mem)
+       for {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
+                       break
+               }
+               idx := v_1.Args[0]
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               v.reset(OpARMMOVWloadshiftRL)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWloadidx (SRLconst idx [c]) ptr mem)
+       // cond:
+       // result: (MOVWloadshiftRL ptr idx [c] mem)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRLconst {
+                       break
+               }
+               idx := v_0.Args[0]
+               c := v_0.AuxInt
+               ptr := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVWloadshiftRL)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWloadidx ptr (SRAconst idx [c]) mem)
+       // cond:
+       // result: (MOVWloadshiftRA ptr idx [c] mem)
+       for {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
+                       break
+               }
+               idx := v_1.Args[0]
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               v.reset(OpARMMOVWloadshiftRA)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWloadidx (SRAconst idx [c]) ptr mem)
+       // cond:
+       // result: (MOVWloadshiftRA ptr idx [c] mem)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRAconst {
+                       break
+               }
+               idx := v_0.Args[0]
+               c := v_0.AuxInt
+               ptr := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVWloadshiftRA)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _))
+       // cond: c==d && isSamePtr(ptr, ptr2)
+       // result: x
+       for {
+               ptr := v.Args[0]
+               idx := v.Args[1]
+               c := v.AuxInt
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWstoreshiftLL {
+                       break
+               }
+               ptr2 := v_2.Args[0]
+               if idx != v_2.Args[1] {
+                       break
+               }
+               d := v_2.AuxInt
+               x := v_2.Args[2]
+               if !(c == d && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem)
+       // cond:
+       // result: (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem)
+       for {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               mem := v.Args[2]
+               v.reset(OpARMMOVWload)
+               v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _))
+       // cond: c==d && isSamePtr(ptr, ptr2)
+       // result: x
+       for {
+               ptr := v.Args[0]
+               idx := v.Args[1]
+               c := v.AuxInt
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWstoreshiftRA {
+                       break
+               }
+               ptr2 := v_2.Args[0]
+               if idx != v_2.Args[1] {
+                       break
+               }
+               d := v_2.AuxInt
+               x := v_2.Args[2]
+               if !(c == d && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem)
+       // cond:
+       // result: (MOVWload [int64(int32(c)>>uint64(d))] ptr mem)
+       for {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               mem := v.Args[2]
+               v.reset(OpARMMOVWload)
+               v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _))
+       // cond: c==d && isSamePtr(ptr, ptr2)
+       // result: x
+       for {
+               ptr := v.Args[0]
+               idx := v.Args[1]
+               c := v.AuxInt
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWstoreshiftRL {
+                       break
+               }
+               ptr2 := v_2.Args[0]
+               if idx != v_2.Args[1] {
+                       break
+               }
+               d := v_2.AuxInt
+               x := v_2.Args[2]
+               if !(c == d && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem)
+       // cond:
+       // result: (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem)
+       for {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               mem := v.Args[2]
+               v.reset(OpARMMOVWload)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVWreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWreg (MOVWconst [c]))
+       // cond:
+       // result: (MOVWconst [c])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = c
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // cond:
+       // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVWstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVWstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [0] {sym} (ADD ptr idx) val mem)
+       // cond: sym == nil
+       // result: (MOVWstoreidx ptr idx val mem)
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADD {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(sym == nil) {
+                       break
+               }
+               v.reset(OpARMMOVWstoreidx)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem)
+       // cond: sym == nil
+       // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDshiftLL {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               c := v_0.AuxInt
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(sym == nil) {
+                       break
+               }
+               v.reset(OpARMMOVWstoreshiftLL)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem)
+       // cond: sym == nil
+       // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDshiftRL {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               c := v_0.AuxInt
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(sym == nil) {
+                       break
+               }
+               v.reset(OpARMMOVWstoreshiftRL)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem)
+       // cond: sym == nil
+       // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDshiftRA {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               c := v_0.AuxInt
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(sym == nil) {
+                       break
+               }
+               v.reset(OpARMMOVWstoreshiftRA)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWstoreidx ptr (MOVWconst [c]) val mem)
+       // cond:
+       // result: (MOVWstore [c] ptr val mem)
+       for {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstore)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstoreidx (MOVWconst [c]) ptr val mem)
+       // cond:
+       // result: (MOVWstore [c] ptr val mem)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               ptr := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstore)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstoreidx ptr (SLLconst idx [c]) val mem)
+       // cond:
+       // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+       for {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
+                       break
+               }
+               idx := v_1.Args[0]
+               c := v_1.AuxInt
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstoreshiftLL)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstoreidx (SLLconst idx [c]) ptr val mem)
+       // cond:
+       // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLLconst {
+                       break
+               }
+               idx := v_0.Args[0]
+               c := v_0.AuxInt
+               ptr := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstoreshiftLL)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstoreidx ptr (SRLconst idx [c]) val mem)
+       // cond:
+       // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+       for {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
+                       break
+               }
+               idx := v_1.Args[0]
+               c := v_1.AuxInt
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstoreshiftRL)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstoreidx (SRLconst idx [c]) ptr val mem)
+       // cond:
+       // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRLconst {
+                       break
+               }
+               idx := v_0.Args[0]
+               c := v_0.AuxInt
+               ptr := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstoreshiftRL)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstoreidx ptr (SRAconst idx [c]) val mem)
+       // cond:
+       // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+       for {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
+                       break
+               }
+               idx := v_1.Args[0]
+               c := v_1.AuxInt
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstoreshiftRA)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstoreidx (SRAconst idx [c]) ptr val mem)
+       // cond:
+       // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRAconst {
+                       break
+               }
+               idx := v_0.Args[0]
+               c := v_0.AuxInt
+               ptr := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstoreshiftRA)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AuxInt = c
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem)
+       // cond:
+       // result: (MOVWstore [int64(uint32(c)<<uint64(d))] ptr val mem)
+       for {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstore)
+               v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem)
+       // cond:
+       // result: (MOVWstore [int64(int32(c)>>uint64(d))] ptr val mem)
+       for {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstore)
+               v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem)
+       // cond:
+       // result: (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem)
+       for {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstore)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MUL x (MOVWconst [c]))
+       // cond: int32(c) == -1
+       // result: (RSBconst [0] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(int32(c) == -1) {
+                       break
+               }
+               v.reset(OpARMRSBconst)
+               v.AuxInt = 0
+               v.AddArg(x)
+               return true
+       }
+       // match: (MUL _ (MOVWconst [0]))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               if v_1.AuxInt != 0 {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (MUL x (MOVWconst [1]))
+       // cond:
+       // result: x
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               if v_1.AuxInt != 1 {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (MUL x (MOVWconst [c]))
+       // cond: isPowerOfTwo(c)
+       // result: (SLLconst [log2(c)] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MUL x (MOVWconst [c]))
+       // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+       // result: (ADDshiftLL x x [log2(c-1)])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+                       break
+               }
+               v.reset(OpARMADDshiftLL)
+               v.AddArg(x)
+               v.AddArg(x)
+               v.AuxInt = log2(c - 1)
+               return true
+       }
+       // match: (MUL x (MOVWconst [c]))
+       // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+       // result: (RSBshiftLL x x [log2(c+1)])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+                       break
+               }
+               v.reset(OpARMRSBshiftLL)
+               v.AddArg(x)
+               v.AddArg(x)
+               v.AuxInt = log2(c + 1)
+               return true
+       }
+       // match: (MUL x (MOVWconst [c]))
+       // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+       // result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c / 3)
+               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v0.AuxInt = 1
+               v.AddArg(v0)
+               return true
+       }
+       // match: (MUL x (MOVWconst [c]))
+       // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+       // result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c / 5)
+               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v0.AuxInt = 2
+               v.AddArg(v0)
+               return true
+       }
+       // match: (MUL x (MOVWconst [c]))
+       // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+       // result: (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c / 7)
+               v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v0.AuxInt = 3
+               v.AddArg(v0)
+               return true
+       }
+       // match: (MUL x (MOVWconst [c]))
+       // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+       // result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c / 9)
+               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v0.AuxInt = 3
+               v.AddArg(v0)
+               return true
+       }
+       // match: (MUL (MOVWconst [c]) x)
+       // cond: int32(c) == -1
+       // result: (RSBconst [0] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(int32(c) == -1) {
+                       break
+               }
+               v.reset(OpARMRSBconst)
+               v.AuxInt = 0
+               v.AddArg(x)
+               return true
+       }
+       // match: (MUL (MOVWconst [0]) _)
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               if v_0.AuxInt != 0 {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (MUL (MOVWconst [1]) x)
+       // cond:
+       // result: x
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               if v_0.AuxInt != 1 {
+                       break
+               }
+               x := v.Args[1]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (MUL (MOVWconst [c]) x)
+       // cond: isPowerOfTwo(c)
+       // result: (SLLconst [log2(c)] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MUL (MOVWconst [c]) x)
+       // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+       // result: (ADDshiftLL x x [log2(c-1)])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+                       break
+               }
+               v.reset(OpARMADDshiftLL)
+               v.AddArg(x)
+               v.AddArg(x)
+               v.AuxInt = log2(c - 1)
+               return true
+       }
+       // match: (MUL (MOVWconst [c]) x)
+       // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+       // result: (RSBshiftLL x x [log2(c+1)])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+                       break
+               }
+               v.reset(OpARMRSBshiftLL)
+               v.AddArg(x)
+               v.AddArg(x)
+               v.AuxInt = log2(c + 1)
+               return true
+       }
+       // match: (MUL (MOVWconst [c]) x)
+       // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+       // result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c / 3)
+               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v0.AuxInt = 1
+               v.AddArg(v0)
+               return true
+       }
+       // match: (MUL (MOVWconst [c]) x)
+       // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+       // result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c / 5)
+               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v0.AuxInt = 2
+               v.AddArg(v0)
+               return true
+       }
+       // match: (MUL (MOVWconst [c]) x)
+       // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+       // result: (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c / 7)
+               v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v0.AuxInt = 3
+               v.AddArg(v0)
+               return true
+       }
+       // match: (MUL (MOVWconst [c]) x)
+       // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+       // result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c / 9)
+               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v0.AuxInt = 3
+               v.AddArg(v0)
+               return true
+       }
+       // match: (MUL (MOVWconst [c]) (MOVWconst [d]))
+       // cond:
+       // result: (MOVWconst [int64(int32(c*d))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               d := v_1.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(int32(c * d))
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MULA x (MOVWconst [c]) a)
+       // cond: int32(c) == -1
+       // result: (SUB a x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               a := v.Args[2]
+               if !(int32(c) == -1) {
+                       break
+               }
+               v.reset(OpARMSUB)
+               v.AddArg(a)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MULA _ (MOVWconst [0]) a)
+       // cond:
+       // result: a
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               if v_1.AuxInt != 0 {
+                       break
+               }
+               a := v.Args[2]
+               v.reset(OpCopy)
+               v.Type = a.Type
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA x (MOVWconst [1]) a)
+       // cond:
+       // result: (ADD x a)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               if v_1.AuxInt != 1 {
+                       break
+               }
+               a := v.Args[2]
+               v.reset(OpARMADD)
+               v.AddArg(x)
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA x (MOVWconst [c]) a)
+       // cond: isPowerOfTwo(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c)] x) a)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               a := v.Args[2]
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA x (MOVWconst [c]) a)
+       // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+       // result: (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               a := v.Args[2]
+               if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v0.AuxInt = log2(c - 1)
+               v.AddArg(v0)
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA x (MOVWconst [c]) a)
+       // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+       // result: (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               a := v.Args[2]
+               if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v0.AuxInt = log2(c + 1)
+               v.AddArg(v0)
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA x (MOVWconst [c]) a)
+       // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               a := v.Args[2]
+               if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c / 3)
+               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v1.AddArg(x)
+               v1.AddArg(x)
+               v1.AuxInt = 1
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA x (MOVWconst [c]) a)
+       // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               a := v.Args[2]
+               if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c / 5)
+               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v1.AddArg(x)
+               v1.AddArg(x)
+               v1.AuxInt = 2
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA x (MOVWconst [c]) a)
+       // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               a := v.Args[2]
+               if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c / 7)
+               v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+               v1.AddArg(x)
+               v1.AddArg(x)
+               v1.AuxInt = 3
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA x (MOVWconst [c]) a)
+       // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               a := v.Args[2]
+               if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c / 9)
+               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v1.AddArg(x)
+               v1.AddArg(x)
+               v1.AuxInt = 3
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA (MOVWconst [c]) x a)
+       // cond: int32(c) == -1
+       // result: (SUB a x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               a := v.Args[2]
+               if !(int32(c) == -1) {
+                       break
+               }
+               v.reset(OpARMSUB)
+               v.AddArg(a)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MULA (MOVWconst [0]) _ a)
+       // cond:
+       // result: a
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               if v_0.AuxInt != 0 {
+                       break
+               }
+               a := v.Args[2]
+               v.reset(OpCopy)
+               v.Type = a.Type
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA (MOVWconst [1]) x a)
+       // cond:
+       // result: (ADD x a)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               if v_0.AuxInt != 1 {
+                       break
+               }
+               x := v.Args[1]
+               a := v.Args[2]
+               v.reset(OpARMADD)
+               v.AddArg(x)
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA (MOVWconst [c]) x a)
+       // cond: isPowerOfTwo(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c)] x) a)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               a := v.Args[2]
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA (MOVWconst [c]) x a)
+       // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+       // result: (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               a := v.Args[2]
+               if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v0.AuxInt = log2(c - 1)
+               v.AddArg(v0)
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA (MOVWconst [c]) x a)
+       // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+       // result: (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               a := v.Args[2]
+               if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v0.AuxInt = log2(c + 1)
+               v.AddArg(v0)
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA (MOVWconst [c]) x a)
+       // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               a := v.Args[2]
+               if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c / 3)
+               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v1.AddArg(x)
+               v1.AddArg(x)
+               v1.AuxInt = 1
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA (MOVWconst [c]) x a)
+       // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               a := v.Args[2]
+               if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c / 5)
+               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v1.AddArg(x)
+               v1.AddArg(x)
+               v1.AuxInt = 2
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA (MOVWconst [c]) x a)
+       // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               a := v.Args[2]
+               if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c / 7)
+               v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+               v1.AddArg(x)
+               v1.AddArg(x)
+               v1.AuxInt = 3
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA (MOVWconst [c]) x a)
+       // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               a := v.Args[2]
+               if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c / 9)
+               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v1.AddArg(x)
+               v1.AddArg(x)
+               v1.AuxInt = 3
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v.AddArg(a)
+               return true
+       }
+       // match: (MULA (MOVWconst [c]) (MOVWconst [d]) a)
+       // cond:
+       // result: (ADDconst [int64(int32(c*d))] a)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               d := v_1.AuxInt
+               a := v.Args[2]
+               v.reset(OpARMADDconst)
+               v.AuxInt = int64(int32(c * d))
+               v.AddArg(a)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMVN(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MVN (MOVWconst [c]))
+       // cond:
+       // result: (MOVWconst [^c])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = ^c
+               return true
+       }
+       // match: (MVN (SLLconst [c] x))
+       // cond:
+       // result: (MVNshiftLL x [c])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMMVNshiftLL)
+               v.AddArg(x)
+               v.AuxInt = c
+               return true
+       }
+       // match: (MVN (SRLconst [c] x))
+       // cond:
+       // result: (MVNshiftRL x [c])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMMVNshiftRL)
+               v.AddArg(x)
+               v.AuxInt = c
+               return true
+       }
+       // match: (MVN (SRAconst [c] x))
+       // cond:
+       // result: (MVNshiftRA x [c])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMMVNshiftRA)
+               v.AddArg(x)
+               v.AuxInt = c
+               return true
+       }
+       // match: (MVN (SLL x y))
+       // cond:
+       // result: (MVNshiftLLreg x y)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLL {
+                       break
+               }
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               v.reset(OpARMMVNshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+       // match: (MVN (SRL x y))
+       // cond:
+       // result: (MVNshiftRLreg x y)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRL {
+                       break
+               }
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               v.reset(OpARMMVNshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+       // match: (MVN (SRA x y))
+       // cond:
+       // result: (MVNshiftRAreg x y)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRA {
+                       break
+               }
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               v.reset(OpARMMVNshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMVNshiftLL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MVNshiftLL (MOVWconst [c]) [d])
+       // cond:
+       // result: (MOVWconst [^int64(uint32(c)<<uint64(d))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = ^int64(uint32(c) << uint64(d))
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMVNshiftLLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MVNshiftLLreg x (MOVWconst [c]))
+       // cond:
+       // result: (MVNshiftLL x [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMMVNshiftLL)
+               v.AddArg(x)
+               v.AuxInt = c
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMVNshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MVNshiftRA (MOVWconst [c]) [d])
+       // cond:
+       // result: (MOVWconst [^int64(int32(c)>>uint64(d))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = ^int64(int32(c) >> uint64(d))
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMVNshiftRAreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MVNshiftRAreg x (MOVWconst [c]))
+       // cond:
+       // result: (MVNshiftRA x [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMMVNshiftRA)
+               v.AddArg(x)
+               v.AuxInt = c
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMVNshiftRL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MVNshiftRL (MOVWconst [c]) [d])
+       // cond:
+       // result: (MOVWconst [^int64(uint32(c)>>uint64(d))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = ^int64(uint32(c) >> uint64(d))
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMVNshiftRLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MVNshiftRLreg x (MOVWconst [c]))
+       // cond:
+       // result: (MVNshiftRL x [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMMVNshiftRL)
+               v.AddArg(x)
+               v.AuxInt = c
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpMod16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod16 x y)
+       // cond:
+       // result: (MOD (SignExt16to32 x) (SignExt16to32 y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMOD)
+               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM_OpMod16u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod16u x y)
+       // cond:
+       // result: (MODU (ZeroExt16to32 x) (ZeroExt16to32 y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMODU)
+               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM_OpMod32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod32 x y)
+       // cond:
+       // result: (MOD x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMOD)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpMod32u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod32u x y)
+       // cond:
+       // result: (MODU x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMODU)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpMod8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod8 x y)
+       // cond:
+       // result: (MOD (SignExt8to32 x) (SignExt8to32 y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMOD)
+               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM_OpMod8u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod8u x y)
+       // cond:
+       // result: (MODU (ZeroExt8to32 x) (ZeroExt8to32 y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMODU)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM_OpMove(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Move [s] _ _ mem)
+       // cond: SizeAndAlign(s).Size() == 0
+       // result: mem
+       for {
+               s := v.AuxInt
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 0) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = mem.Type
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 1
+       // result: (MOVBstore dst (MOVBUload src mem) mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 1) {
+                       break
+               }
+               v.reset(OpARMMOVBstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore dst (MOVHUload src mem) mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpARMMOVHstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 2
+       // result: (MOVBstore [1] dst (MOVBUload [1] src mem)           (MOVBstore dst (MOVBUload src mem) mem))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 2) {
+                       break
+               }
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = 1
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v0.AuxInt = 1
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+       // result: (MOVWstore dst (MOVWload src mem) mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+                       break
+               }
+               v.reset(OpARMMOVWstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARMMOVWload, config.fe.TypeUInt32())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore [2] dst (MOVHUload [2] src mem)           (MOVHstore dst (MOVHUload src mem) mem))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpARMMOVHstore)
+               v.AuxInt = 2
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+               v0.AuxInt = 2
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 4
+       // result: (MOVBstore [3] dst (MOVBUload [3] src mem)           (MOVBstore [2] dst (MOVBUload [2] src mem)                      (MOVBstore [1] dst (MOVBUload [1] src mem)                              (MOVBstore dst (MOVBUload src mem) mem))))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 4) {
+                       break
+               }
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = 3
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v0.AuxInt = 3
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+               v1.AuxInt = 2
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v2.AuxInt = 2
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+               v3.AuxInt = 1
+               v3.AddArg(dst)
+               v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v4.AuxInt = 1
+               v4.AddArg(src)
+               v4.AddArg(mem)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+               v5.AddArg(dst)
+               v6 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v6.AddArg(src)
+               v6.AddArg(mem)
+               v5.AddArg(v6)
+               v5.AddArg(mem)
+               v3.AddArg(v5)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 3
+       // result: (MOVBstore [2] dst (MOVBUload [2] src mem)           (MOVBstore [1] dst (MOVBUload [1] src mem)                      (MOVBstore dst (MOVBUload src mem) mem)))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 3) {
+                       break
+               }
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = 2
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v0.AuxInt = 2
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+               v1.AuxInt = 1
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v2.AuxInt = 1
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+               v3.AddArg(dst)
+               v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v4.AddArg(src)
+               v4.AddArg(mem)
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512   && SizeAndAlign(s).Align()%4 == 0
+       // result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0) {
+                       break
+               }
+               v.reset(OpARMDUFFCOPY)
+               v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/4))
+               v.AddArg(dst)
+               v.AddArg(src)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512  && SizeAndAlign(s).Align()%4 == 0
+       // result: (LoweredMove dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()]) mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0) {
+                       break
+               }
+               v.reset(OpARMLoweredMove)
+               v.AddArg(dst)
+               v.AddArg(src)
+               v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
+               v0.AddArg(src)
+               v0.AuxInt = SizeAndAlign(s).Size()
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0
+       // result: (LoweredMoveU dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()]) mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0) {
+                       break
+               }
+               v.reset(OpARMLoweredMoveU)
+               v.AddArg(dst)
+               v.AddArg(src)
+               v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
+               v0.AddArg(src)
+               v0.AuxInt = SizeAndAlign(s).Size()
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpMul16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul16 x y)
+       // cond:
+       // result: (MUL x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMUL)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpMul32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul32 x y)
+       // cond:
+       // result: (MUL x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMUL)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpMul32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul32F x y)
+       // cond:
+       // result: (MULF x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMULF)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpMul32uhilo(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul32uhilo x y)
+       // cond:
+       // result: (MULLU x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMULLU)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpMul64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul64F x y)
+       // cond:
+       // result: (MULD x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMULD)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpMul8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul8 x y)
+       // cond:
+       // result: (MUL x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMUL)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpNeg16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg16 x)
+       // cond:
+       // result: (RSBconst [0] x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARMRSBconst)
+               v.AuxInt = 0
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM_OpNeg32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg32 x)
+       // cond:
+       // result: (RSBconst [0] x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARMRSBconst)
+               v.AuxInt = 0
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM_OpNeg32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg32F x)
+       // cond:
+       // result: (MULF (MOVFconst [int64(math.Float64bits(-1))]) x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARMMULF)
+               v0 := b.NewValue0(v.Line, OpARMMOVFconst, config.fe.TypeFloat32())
+               v0.AuxInt = int64(math.Float64bits(-1))
+               v.AddArg(v0)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM_OpNeg64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg64F x)
+       // cond:
+       // result: (MULD (MOVDconst [int64(math.Float64bits(-1))]) x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARMMULD)
+               v0 := b.NewValue0(v.Line, OpARMMOVDconst, config.fe.TypeFloat64())
+               v0.AuxInt = int64(math.Float64bits(-1))
+               v.AddArg(v0)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM_OpNeg8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg8 x)
+       // cond:
+       // result: (RSBconst [0] x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARMRSBconst)
+               v.AuxInt = 0
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM_OpNeq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq16 x y)
+       // cond:
+       // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMNotEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpNeq32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq32 x y)
+       // cond:
+       // result: (NotEqual (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMNotEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpNeq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq32F x y)
+       // cond:
+       // result: (NotEqual (CMPF x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMNotEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpNeq64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq64F x y)
+       // cond:
+       // result: (NotEqual (CMPD x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMNotEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpNeq8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq8 x y)
+       // cond:
+       // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMNotEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpNeqB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NeqB x y)
+       // cond:
+       // result: (XOR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMXOR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NeqPtr x y)
+       // cond:
+       // result: (NotEqual (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMNotEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NilCheck ptr mem)
+       // cond:
+       // result: (LoweredNilCheck ptr mem)
+       for {
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMLoweredNilCheck)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValueARM_OpNot(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Not x)
+       // cond:
+       // result: (XORconst [1] x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARMXORconst)
+               v.AuxInt = 1
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM_OpARMNotEqual(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NotEqual (FlagEQ))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (NotEqual (FlagLT_ULT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (NotEqual (FlagLT_UGT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (NotEqual (FlagGT_ULT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (NotEqual (FlagGT_UGT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (NotEqual (InvertFlags x))
+       // cond:
+       // result: (NotEqual x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMNotEqual)
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (OR (MOVWconst [c]) x)
+       // cond:
+       // result: (ORconst [c] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMORconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (OR x (MOVWconst [c]))
+       // cond:
+       // result: (ORconst [c] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMORconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (OR x (SLLconst [c] y))
+       // cond:
+       // result: (ORshiftLL x y [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMORshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       // match: (OR (SLLconst [c] y) x)
+       // cond:
+       // result: (ORshiftLL x y [c])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMORshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       // match: (OR x (SRLconst [c] y))
+       // cond:
+       // result: (ORshiftRL x y [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMORshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       // match: (OR (SRLconst [c] y) x)
+       // cond:
+       // result: (ORshiftRL x y [c])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMORshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       // match: (OR x (SRAconst [c] y))
+       // cond:
+       // result: (ORshiftRA x y [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMORshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       // match: (OR (SRAconst [c] y) x)
+       // cond:
+       // result: (ORshiftRA x y [c])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMORshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       // match: (OR x (SLL y z))
+       // cond:
+       // result: (ORshiftLLreg x y z)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLL {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMORshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               return true
+       }
+       // match: (OR (SLL y z) x)
+       // cond:
+       // result: (ORshiftLLreg x y z)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLL {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMORshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               return true
+       }
+       // match: (OR x (SRL y z))
+       // cond:
+       // result: (ORshiftRLreg x y z)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRL {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMORshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               return true
+       }
+       // match: (OR (SRL y z) x)
+       // cond:
+       // result: (ORshiftRLreg x y z)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRL {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMORshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               return true
+       }
+       // match: (OR x (SRA y z))
+       // cond:
+       // result: (ORshiftRAreg x y z)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRA {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMORshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               return true
+       }
+       // match: (OR (SRA y z) x)
+       // cond:
+       // result: (ORshiftRAreg x y z)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRA {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMORshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               return true
+       }
+       // match: (OR x x)
+       // cond:
+       // result: x
+       for {
+               x := v.Args[0]
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMORconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ORconst [0] x)
+       // cond:
+       // result: x
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (ORconst [c] _)
+       // cond: int32(c)==-1
+       // result: (MOVWconst [-1])
+       for {
+               c := v.AuxInt
+               if !(int32(c) == -1) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = -1
+               return true
+       }
+       // match: (ORconst [c] (MOVWconst [d]))
+       // cond:
+       // result: (MOVWconst [c|d])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = c | d
+               return true
+       }
+       // match: (ORconst [c] (ORconst [d] x))
+       // cond:
+       // result: (ORconst [c|d] x)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMORconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMORconst)
+               v.AuxInt = c | d
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMORshiftLL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ORshiftLL (MOVWconst [c]) x [d])
+       // cond:
+       // result: (ORconst [c] (SLLconst <x.Type> x [d]))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMORconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
+               return true
+       }
+       // match: (ORshiftLL x (MOVWconst [c]) [d])
+       // cond:
+       // result: (ORconst x [int64(uint32(c)<<uint64(d))])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMORconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) << uint64(d))
+               return true
+       }
+       // match: (ORshiftLL x y:(SLLconst x [c]) [d])
+       // cond: c==d
+       // result: y
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               if y.Op != OpARMSLLconst {
+                       break
+               }
+               if x != y.Args[0] {
+                       break
+               }
+               c := y.AuxInt
+               d := v.AuxInt
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = y.Type
+               v.AddArg(y)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMORshiftLLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ORshiftLLreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (ORconst [c] (SLL <x.Type> x y))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMORconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (ORshiftLLreg x y (MOVWconst [c]))
+       // cond:
+       // result: (ORshiftLL x y [c])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMORshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMORshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ORshiftRA (MOVWconst [c]) x [d])
+       // cond:
+       // result: (ORconst [c] (SRAconst <x.Type> x [d]))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMORconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
+               return true
+       }
+       // match: (ORshiftRA x (MOVWconst [c]) [d])
+       // cond:
+       // result: (ORconst x [int64(int32(c)>>uint64(d))])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMORconst)
+               v.AddArg(x)
+               v.AuxInt = int64(int32(c) >> uint64(d))
+               return true
+       }
+       // match: (ORshiftRA x y:(SRAconst x [c]) [d])
+       // cond: c==d
+       // result: y
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               if y.Op != OpARMSRAconst {
+                       break
+               }
+               if x != y.Args[0] {
+                       break
+               }
+               c := y.AuxInt
+               d := v.AuxInt
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = y.Type
+               v.AddArg(y)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMORshiftRAreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ORshiftRAreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (ORconst [c] (SRA <x.Type> x y))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMORconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (ORshiftRAreg x y (MOVWconst [c]))
+       // cond:
+       // result: (ORshiftRA x y [c])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMORshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMORshiftRL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ORshiftRL (MOVWconst [c]) x [d])
+       // cond:
+       // result: (ORconst [c] (SRLconst <x.Type> x [d]))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMORconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
+               return true
+       }
+       // match: (ORshiftRL x (MOVWconst [c]) [d])
+       // cond:
+       // result: (ORconst x [int64(uint32(c)>>uint64(d))])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMORconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
+               return true
+       }
+       // match: (ORshiftRL x y:(SRLconst x [c]) [d])
+       // cond: c==d
+       // result: y
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               if y.Op != OpARMSRLconst {
+                       break
+               }
+               if x != y.Args[0] {
+                       break
+               }
+               c := y.AuxInt
+               d := v.AuxInt
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = y.Type
+               v.AddArg(y)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMORshiftRLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ORshiftRLreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (ORconst [c] (SRL <x.Type> x y))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMORconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (ORshiftRLreg x y (MOVWconst [c]))
+       // cond:
+       // result: (ORshiftRL x y [c])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMORshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (OffPtr [off] ptr:(SP))
+       // cond:
+       // result: (MOVWaddr [off] ptr)
+       for {
+               off := v.AuxInt
+               ptr := v.Args[0]
+               if ptr.Op != OpSP {
+                       break
+               }
+               v.reset(OpARMMOVWaddr)
+               v.AuxInt = off
+               v.AddArg(ptr)
+               return true
+       }
+       // match: (OffPtr [off] ptr)
+       // cond:
+       // result: (ADDconst [off] ptr)
+       for {
+               off := v.AuxInt
+               ptr := v.Args[0]
+               v.reset(OpARMADDconst)
+               v.AuxInt = off
+               v.AddArg(ptr)
+               return true
+       }
+}
+func rewriteValueARM_OpOr16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or16 x y)
+       // cond:
+       // result: (OR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMOR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpOr32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or32 x y)
+       // cond:
+       // result: (OR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMOR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpOr8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or8 x y)
+       // cond:
+       // result: (OR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMOR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpOrB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (OrB x y)
+       // cond:
+       // result: (OR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMOR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSB (MOVWconst [c]) x)
+       // cond:
+       // result: (SUBconst [c] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMSUBconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (RSB x (MOVWconst [c]))
+       // cond:
+       // result: (RSBconst [c] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMRSBconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (RSB x (SLLconst [c] y))
+       // cond:
+       // result: (RSBshiftLL x y [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMRSBshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       // match: (RSB (SLLconst [c] y) x)
+       // cond:
+       // result: (SUBshiftLL x y [c])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMSUBshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       // match: (RSB x (SRLconst [c] y))
+       // cond:
+       // result: (RSBshiftRL x y [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMRSBshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       // match: (RSB (SRLconst [c] y) x)
+       // cond:
+       // result: (SUBshiftRL x y [c])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMSUBshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       // match: (RSB x (SRAconst [c] y))
+       // cond:
+       // result: (RSBshiftRA x y [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMRSBshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       // match: (RSB (SRAconst [c] y) x)
+       // cond:
+       // result: (SUBshiftRA x y [c])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMSUBshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       // match: (RSB x (SLL y z))
+       // cond:
+       // result: (RSBshiftLLreg x y z)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLL {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMRSBshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               return true
+       }
+       // match: (RSB (SLL y z) x)
+       // cond:
+       // result: (SUBshiftLLreg x y z)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLL {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMSUBshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               return true
+       }
+       // match: (RSB x (SRL y z))
+       // cond:
+       // result: (RSBshiftRLreg x y z)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRL {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMRSBshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               return true
+       }
+       // match: (RSB (SRL y z) x)
+       // cond:
+       // result: (SUBshiftRLreg x y z)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRL {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMSUBshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               return true
+       }
+       // match: (RSB x (SRA y z))
+       // cond:
+       // result: (RSBshiftRAreg x y z)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRA {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMRSBshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               return true
+       }
+       // match: (RSB (SRA y z) x)
+       // cond:
+       // result: (SUBshiftRAreg x y z)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRA {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMSUBshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               return true
+       }
+       // match: (RSB x x)
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               x := v.Args[0]
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSBSshiftLL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBSshiftLL (MOVWconst [c]) x [d])
+       // cond:
+       // result: (SUBSconst [c] (SLLconst <x.Type> x [d]))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
+               return true
+       }
+       // match: (RSBSshiftLL x (MOVWconst [c]) [d])
+       // cond:
+       // result: (RSBSconst x [int64(uint32(c)<<uint64(d))])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMRSBSconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) << uint64(d))
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBSshiftLLreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (SUBSconst [c] (SLL <x.Type> x y))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (RSBSshiftLLreg x y (MOVWconst [c]))
+       // cond:
+       // result: (RSBSshiftLL x y [c])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMRSBSshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSBSshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBSshiftRA (MOVWconst [c]) x [d])
+       // cond:
+       // result: (SUBSconst [c] (SRAconst <x.Type> x [d]))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
+               return true
+       }
+       // match: (RSBSshiftRA x (MOVWconst [c]) [d])
+       // cond:
+       // result: (RSBSconst x [int64(int32(c)>>uint64(d))])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMRSBSconst)
+               v.AddArg(x)
+               v.AuxInt = int64(int32(c) >> uint64(d))
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBSshiftRAreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (SUBSconst [c] (SRA <x.Type> x y))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (RSBSshiftRAreg x y (MOVWconst [c]))
+       // cond:
+       // result: (RSBSshiftRA x y [c])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMRSBSshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSBSshiftRL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBSshiftRL (MOVWconst [c]) x [d])
+       // cond:
+       // result: (SUBSconst [c] (SRLconst <x.Type> x [d]))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
+               return true
+       }
+       // match: (RSBSshiftRL x (MOVWconst [c]) [d])
+       // cond:
+       // result: (RSBSconst x [int64(uint32(c)>>uint64(d))])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMRSBSconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBSshiftRLreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (SUBSconst [c] (SRL <x.Type> x y))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (RSBSshiftRLreg x y (MOVWconst [c]))
+       // cond:
+       // result: (RSBSshiftRL x y [c])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMRSBSshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSBconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBconst [c] (MOVWconst [d]))
+       // cond:
+       // result: (MOVWconst [int64(int32(c-d))])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(int32(c - d))
+               return true
+       }
+       // match: (RSBconst [c] (RSBconst [d] x))
+       // cond:
+       // result: (ADDconst [int64(int32(c-d))] x)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMRSBconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMADDconst)
+               v.AuxInt = int64(int32(c - d))
+               v.AddArg(x)
+               return true
+       }
+       // match: (RSBconst [c] (ADDconst [d] x))
+       // cond:
+       // result: (RSBconst [int64(int32(c-d))] x)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMRSBconst)
+               v.AuxInt = int64(int32(c - d))
+               v.AddArg(x)
+               return true
+       }
+       // match: (RSBconst [c] (SUBconst [d] x))
+       // cond:
+       // result: (RSBconst [int64(int32(c+d))] x)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSUBconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMRSBconst)
+               v.AuxInt = int64(int32(c + d))
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSBshiftLL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBshiftLL (MOVWconst [c]) x [d])
+       // cond:
+       // result: (SUBconst [c] (SLLconst <x.Type> x [d]))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMSUBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
+               return true
+       }
+       // match: (RSBshiftLL x (MOVWconst [c]) [d])
+       // cond:
+       // result: (RSBconst x [int64(uint32(c)<<uint64(d))])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMRSBconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) << uint64(d))
+               return true
+       }
+       // match: (RSBshiftLL x (SLLconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
+                       break
+               }
+               if x != v_1.Args[0] {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSBshiftLLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBshiftLLreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (SUBconst [c] (SLL <x.Type> x y))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMSUBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (RSBshiftLLreg x y (MOVWconst [c]))
+       // cond:
+       // result: (RSBshiftLL x y [c])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMRSBshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSBshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBshiftRA (MOVWconst [c]) x [d])
+       // cond:
+       // result: (SUBconst [c] (SRAconst <x.Type> x [d]))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMSUBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
+               return true
+       }
+       // match: (RSBshiftRA x (MOVWconst [c]) [d])
+       // cond:
+       // result: (RSBconst x [int64(int32(c)>>uint64(d))])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMRSBconst)
+               v.AddArg(x)
+               v.AuxInt = int64(int32(c) >> uint64(d))
+               return true
+       }
+       // match: (RSBshiftRA x (SRAconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
+                       break
+               }
+               if x != v_1.Args[0] {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSBshiftRAreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBshiftRAreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (SUBconst [c] (SRA <x.Type> x y))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMSUBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (RSBshiftRAreg x y (MOVWconst [c]))
+       // cond:
+       // result: (RSBshiftRA x y [c])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMRSBshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSBshiftRL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBshiftRL (MOVWconst [c]) x [d])
+       // cond:
+       // result: (SUBconst [c] (SRLconst <x.Type> x [d]))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMSUBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
+               return true
+       }
+       // match: (RSBshiftRL x (MOVWconst [c]) [d])
+       // cond:
+       // result: (RSBconst x [int64(uint32(c)>>uint64(d))])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMRSBconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
+               return true
+       }
+       // match: (RSBshiftRL x (SRLconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
+                       break
+               }
+               if x != v_1.Args[0] {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSBshiftRLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBshiftRLreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (SUBconst [c] (SRL <x.Type> x y))
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMSUBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (RSBshiftRLreg x y (MOVWconst [c]))
+       // cond:
+       // result: (RSBshiftRL x y [c])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVDstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               c := v_2.AuxInt
+               v.reset(OpARMRSBshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMOVFload(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSCconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (RSCconst [c] (ADDconst [d] x) flags)
        // cond:
-       // result: (MOVFload [off1+off2] {sym} ptr mem)
+       // result: (RSCconst [int64(int32(c-d))] x flags)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
+               c := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMADDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMMOVFload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               flags := v.Args[1]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = int64(int32(c - d))
+               v.AddArg(x)
+               v.AddArg(flags)
                return true
        }
-       // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (RSCconst [c] (SUBconst [d] x) flags)
+       // cond:
+       // result: (RSCconst [int64(int32(c+d))] x flags)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSUBconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               flags := v.Args[1]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = int64(int32(c + d))
+               v.AddArg(x)
+               v.AddArg(flags)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSCshiftLL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSCshiftLL (MOVWconst [c]) x [d] flags)
+       // cond:
+       // result: (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (RSCshiftLL x (MOVWconst [c]) [d] flags)
+       // cond:
+       // result: (RSCconst x [int64(uint32(c)<<uint64(d))] flags)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMRSCconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(flags)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSCshiftLLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSCshiftLLreg (MOVWconst [c]) x y flags)
+       // cond:
+       // result: (SBCconst [c] (SLL <x.Type> x y) flags)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               flags := v.Args[3]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (RSCshiftLLreg x y (MOVWconst [c]) flags)
+       // cond:
+       // result: (RSCshiftLL x y [c] flags)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               flags := v.Args[3]
+               v.reset(OpARMRSCshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSCshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSCshiftRA (MOVWconst [c]) x [d] flags)
+       // cond:
+       // result: (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (RSCshiftRA x (MOVWconst [c]) [d] flags)
+       // cond:
+       // result: (RSCconst x [int64(int32(c)>>uint64(d))] flags)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMRSCconst)
+               v.AddArg(x)
+               v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(flags)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSCshiftRAreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSCshiftRAreg (MOVWconst [c]) x y flags)
+       // cond:
+       // result: (SBCconst [c] (SRA <x.Type> x y) flags)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               flags := v.Args[3]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (RSCshiftRAreg x y (MOVWconst [c]) flags)
+       // cond:
+       // result: (RSCshiftRA x y [c] flags)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               flags := v.Args[3]
+               v.reset(OpARMRSCshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSCshiftRL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSCshiftRL (MOVWconst [c]) x [d] flags)
+       // cond:
+       // result: (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVFload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
+               v.AddArg(flags)
                return true
        }
-       // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-       // result: x
+       // match: (RSCshiftRL x (MOVWconst [c]) [d] flags)
+       // cond:
+       // result: (RSCconst x [int64(uint32(c)>>uint64(d))] flags)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVFstore {
-                       break
-               }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               c := v_1.AuxInt
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMRSCconst)
                v.AddArg(x)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(flags)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMOVFstore(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSCshiftRLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // match: (RSCshiftRLreg (MOVWconst [c]) x y flags)
        // cond:
-       // result: (MOVFstore [off1+off2] {sym} ptr val mem)
+       // result: (SBCconst [c] (SRL <x.Type> x y) flags)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVFstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               flags := v.Args[3]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v.AddArg(flags)
                return true
        }
-       // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // match: (RSCshiftRLreg x y (MOVWconst [c]) flags)
+       // cond:
+       // result: (RSCshiftRL x y [c] flags)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVFstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               c := v_2.AuxInt
+               flags := v.Args[3]
+               v.reset(OpARMRSCshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMOVHUload(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (Rsh16Ux16 x y)
        // cond:
-       // result: (MOVHUload [off1+off2] {sym} ptr mem)
+       // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMMOVHUload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v3.AuxInt = 256
+               v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+}
+func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux32 x y)
+       // cond:
+       // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v2.AuxInt = 256
+               v2.AddArg(y)
+               v.AddArg(v2)
+               v.AuxInt = 0
+               return true
+       }
+}
+func rewriteValueARM_OpRsh16Ux64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux64 x (Const64 [c]))
+       // cond: uint64(c) < 16
+       // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_1.AuxInt
+               if !(uint64(c) < 16) {
                        break
                }
-               v.reset(OpARMMOVHUload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARMSRLconst)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v0.AuxInt = 16
+               v.AddArg(v0)
+               v.AuxInt = c + 16
                return true
        }
-       // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)
-       // result: x
+       // match: (Rsh16Ux64 _ (Const64 [c]))
+       // cond: uint64(c) >= 16
+       // result: (Const16 [0])
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVHstore {
+               if v_1.Op != OpConst64 {
                        break
                }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) {
+               c := v_1.AuxInt
+               if !(uint64(c) >= 16) {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               v.reset(OpConst16)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMOVHUreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16Ux8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVHUreg x:(MOVBUload _ _))
+       // match: (Rsh16Ux8  x y)
        // cond:
-       // result: (MOVWreg x)
+       // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
        for {
                x := v.Args[0]
-               if x.Op != OpARMMOVBUload {
-                       break
-               }
-               v.reset(OpARMMOVWreg)
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARMSRL)
+               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (MOVHUreg x:(MOVHUload _ _))
+}
+func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x16 x y)
        // cond:
-       // result: (MOVWreg x)
+       // result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
        for {
                x := v.Args[0]
-               if x.Op != OpARMMOVHUload {
-                       break
-               }
-               v.reset(OpARMMOVWreg)
-               v.AddArg(x)
-               return true
-       }
-       // match: (MOVHUreg (ANDconst [c] x))
-       // cond:
-       // result: (ANDconst [c&0xffff] x)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMANDconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMANDconst)
-               v.AuxInt = c & 0xffff
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARMSRAcond)
+               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v2.AuxInt = 256
+               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v.AddArg(v2)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMMOVHload(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (Rsh16x32 x y)
        // cond:
-       // result: (MOVHload [off1+off2] {sym} ptr mem)
+       // result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMMOVHload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRAcond)
+               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v1.AuxInt = 256
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+}
+func rewriteValueARM_OpRsh16x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x64 x (Const64 [c]))
+       // cond: uint64(c) < 16
+       // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_1.AuxInt
+               if !(uint64(c) < 16) {
                        break
                }
-               v.reset(OpARMMOVHload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARMSRAconst)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v0.AuxInt = 16
+               v.AddArg(v0)
+               v.AuxInt = c + 16
                return true
        }
-       // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)
-       // result: x
+       // match: (Rsh16x64 x (Const64 [c]))
+       // cond: uint64(c) >= 16
+       // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVHstore {
+               if v_1.Op != OpConst64 {
                        break
                }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) {
+               c := v_1.AuxInt
+               if !(uint64(c) >= 16) {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               v.reset(OpARMSRAconst)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v0.AuxInt = 16
+               v.AddArg(v0)
+               v.AuxInt = 31
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMOVHreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVHreg x:(MOVBload _ _))
+       // match: (Rsh16x8  x y)
        // cond:
-       // result: (MOVWreg x)
+       // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
        for {
                x := v.Args[0]
-               if x.Op != OpARMMOVBload {
-                       break
-               }
-               v.reset(OpARMMOVWreg)
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARMSRA)
+               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (MOVHreg x:(MOVBUload _ _))
+}
+func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux16 x y)
        // cond:
-       // result: (MOVWreg x)
+       // result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
        for {
                x := v.Args[0]
-               if x.Op != OpARMMOVBUload {
-                       break
-               }
-               v.reset(OpARMMOVWreg)
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v2.AuxInt = 256
+               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v.AddArg(v2)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVHreg x:(MOVHload _ _))
+}
+func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux32 x y)
        // cond:
-       // result: (MOVWreg x)
+       // result: (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
        for {
                x := v.Args[0]
-               if x.Op != OpARMMOVHload {
-                       break
-               }
-               v.reset(OpARMMOVWreg)
-               v.AddArg(x)
-               return true
-       }
-       // match: (MOVHreg (ANDconst [c] x))
-       // cond: c & 0x8000 == 0
-       // result: (ANDconst [c&0x7fff] x)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMANDconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               if !(c&0x8000 == 0) {
-                       break
-               }
-               v.reset(OpARMANDconst)
-               v.AuxInt = c & 0x7fff
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v1.AuxInt = 256
+               v1.AddArg(y)
+               v.AddArg(v1)
+               v.AuxInt = 0
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMMOVHstore(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32Ux64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-       // cond:
-       // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+       // match: (Rsh32Ux64 x (Const64 [c]))
+       // cond: uint64(c) < 32
+       // result: (SRLconst x [c])
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVHstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               if !(uint64(c) < 32) {
+                       break
+               }
+               v.reset(OpARMSRLconst)
+               v.AddArg(x)
+               v.AuxInt = c
                return true
        }
-       // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // match: (Rsh32Ux64 _ (Const64 [c]))
+       // cond: uint64(c) >= 32
+       // result: (Const32 [0])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_1.AuxInt
+               if !(uint64(c) >= 32) {
                        break
                }
-               v.reset(OpARMMOVHstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpConst32)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+       return false
+}
+func rewriteValueARM_OpRsh32Ux8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux8  x y)
        // cond:
-       // result: (MOVHstore [off] {sym} ptr x mem)
+       // result: (SRL x (ZeroExt8to32 y))
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVHreg {
-                       break
-               }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpARMMOVHstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRL)
                v.AddArg(x)
-               v.AddArg(mem)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+}
+func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32x16 x y)
        // cond:
-       // result: (MOVHstore [off] {sym} ptr x mem)
+       // result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVHUreg {
-                       break
-               }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpARMMOVHstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRAcond)
                v.AddArg(x)
-               v.AddArg(mem)
+               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v1.AuxInt = 256
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (Rsh32x32 x y)
        // cond:
-       // result: (MOVWload [off1+off2] {sym} ptr mem)
+       // result: (SRAcond x y (CMPconst [256] y))
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMMOVWload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRAcond)
+               v.AddArg(x)
+               v.AddArg(y)
+               v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v0.AuxInt = 256
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+}
+func rewriteValueARM_OpRsh32x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32x64 x (Const64 [c]))
+       // cond: uint64(c) < 32
+       // result: (SRAconst x [c])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_1.AuxInt
+               if !(uint64(c) < 32) {
                        break
                }
-               v.reset(OpARMMOVWload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARMSRAconst)
+               v.AddArg(x)
+               v.AuxInt = c
                return true
        }
-       // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-       // result: x
+       // match: (Rsh32x64 x (Const64 [c]))
+       // cond: uint64(c) >= 32
+       // result: (SRAconst x [31])
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWstore {
+               if v_1.Op != OpConst64 {
                        break
                }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+               c := v_1.AuxInt
+               if !(uint64(c) >= 32) {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               v.reset(OpARMSRAconst)
                v.AddArg(x)
+               v.AuxInt = 31
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // match: (Rsh32x8  x y)
        // cond:
-       // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+       // result: (SRA x (ZeroExt8to32 y))
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRA)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8Ux16 x y)
+       // cond:
+       // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v3.AuxInt = 256
+               v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               v.AuxInt = 0
+               return true
+       }
+}
+func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8Ux32 x y)
+       // cond:
+       // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v2.AuxInt = 256
+               v2.AddArg(y)
+               v.AddArg(v2)
+               v.AuxInt = 0
+               return true
+       }
+}
+func rewriteValueARM_OpRsh8Ux64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8Ux64 x (Const64 [c]))
+       // cond: uint64(c) < 8
+       // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVWstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               if !(uint64(c) < 8) {
+                       break
+               }
+               v.reset(OpARMSRLconst)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v0.AuxInt = 24
+               v.AddArg(v0)
+               v.AuxInt = c + 24
                return true
        }
-       // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // match: (Rsh8Ux64 _ (Const64 [c]))
+       // cond: uint64(c) >= 8
+       // result: (Const8 [0])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_1.AuxInt
+               if !(uint64(c) >= 8) {
                        break
                }
-               v.reset(OpARMMOVWstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpConst8)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8Ux8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MUL x (MOVWconst [-1]))
+       // match: (Rsh8Ux8  x y)
        // cond:
-       // result: (RSBconst [0] x)
+       // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_1.AuxInt != -1 {
-                       break
-               }
-               v.reset(OpARMRSBconst)
-               v.AuxInt = 0
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARMSRL)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (MUL _ (MOVWconst [0]))
+}
+func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x16 x y)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_1.AuxInt != 0 {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRAcond)
+               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v2.AuxInt = 256
+               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v.AddArg(v2)
                return true
        }
-       // match: (MUL x (MOVWconst [1]))
+}
+func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x32 x y)
        // cond:
-       // result: x
+       // result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_1.AuxInt != 1 {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARMSRAcond)
+               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v1.AuxInt = 256
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (MUL x (MOVWconst [c]))
-       // cond: isPowerOfTwo(c)
-       // result: (SLLconst [log2(c)] x)
+}
+func rewriteValueARM_OpRsh8x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x64 x (Const64 [c]))
+       // cond: uint64(c) < 8
+       // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpConst64 {
                        break
                }
                c := v_1.AuxInt
-               if !(isPowerOfTwo(c)) {
+               if !(uint64(c) < 8) {
                        break
                }
-               v.reset(OpARMSLLconst)
-               v.AuxInt = log2(c)
-               v.AddArg(x)
+               v.reset(OpARMSRAconst)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v0.AuxInt = 24
+               v.AddArg(v0)
+               v.AuxInt = c + 24
                return true
        }
-       // match: (MUL (MOVWconst [-1]) x)
-       // cond:
-       // result: (RSBconst [0] x)
+       // match: (Rsh8x64 x (Const64 [c]))
+       // cond: uint64(c) >= 8
+       // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               if v_0.AuxInt != -1 {
+               c := v_1.AuxInt
+               if !(uint64(c) >= 8) {
                        break
                }
-               x := v.Args[1]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = 0
-               v.AddArg(x)
+               v.reset(OpARMSRAconst)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v0.AuxInt = 24
+               v.AddArg(v0)
+               v.AuxInt = 31
                return true
        }
-       // match: (MUL (MOVWconst [0]) _)
+       return false
+}
+func rewriteValueARM_OpRsh8x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x8  x y)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_0.AuxInt != 0 {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRA)
+               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (MUL (MOVWconst [1]) x)
+}
+func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SBC (MOVWconst [c]) x flags)
        // cond:
-       // result: x
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_0.AuxInt != 1 {
-                       break
-               }
-               x := v.Args[1]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
-               return true
-       }
-       // match: (MUL (MOVWconst [c]) x)
-       // cond: isPowerOfTwo(c)
-       // result: (SLLconst [log2(c)] x)
+       // result: (RSCconst [c] x flags)
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -5255,1214 +13633,1420 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               if !(isPowerOfTwo(c)) {
-                       break
-               }
-               v.reset(OpARMSLLconst)
-               v.AuxInt = log2(c)
+               flags := v.Args[2]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = c
                v.AddArg(x)
+               v.AddArg(flags)
                return true
        }
-       // match: (MUL (MOVWconst [c]) (MOVWconst [d]))
-       // cond:
-       // result: (MOVWconst [int64(int32(c*d))])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               d := v_1.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(int32(c * d))
-               return true
-       }
-       return false
-}
-func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MULA x (MOVWconst [-1]) a)
+       // match: (SBC x (MOVWconst [c]) flags)
        // cond:
-       // result: (SUB a x)
+       // result: (SBCconst [c] x flags)
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
-               if v_1.AuxInt != -1 {
-                       break
-               }
-               a := v.Args[2]
-               v.reset(OpARMSUB)
-               v.AddArg(a)
+               c := v_1.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = c
                v.AddArg(x)
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA _ (MOVWconst [0]) a)
+       // match: (SBC x (SLLconst [c] y) flags)
        // cond:
-       // result: a
+       // result: (SBCshiftLL x y [c] flags)
        for {
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_1.AuxInt != 0 {
+               if v_1.Op != OpARMSLLconst {
                        break
                }
-               a := v.Args[2]
-               v.reset(OpCopy)
-               v.Type = a.Type
-               v.AddArg(a)
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               flags := v.Args[2]
+               v.reset(OpARMSBCshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA x (MOVWconst [1]) a)
+       // match: (SBC (SLLconst [c] y) x flags)
        // cond:
-       // result: (ADD x a)
+       // result: (RSCshiftLL x y [c] flags)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_1.AuxInt != 1 {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLLconst {
                        break
                }
-               a := v.Args[2]
-               v.reset(OpARMADD)
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMRSCshiftLL)
                v.AddArg(x)
-               v.AddArg(a)
+               v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA x (MOVWconst [c]) a)
-       // cond: isPowerOfTwo(c)
-       // result: (ADD (SLLconst <x.Type> [log2(c)] x) a)
+       // match: (SBC x (SRLconst [c] y) flags)
+       // cond:
+       // result: (SBCshiftRL x y [c] flags)
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpARMSRLconst {
                        break
                }
                c := v_1.AuxInt
-               a := v.Args[2]
-               if !(isPowerOfTwo(c)) {
-                       break
-               }
-               v.reset(OpARMADD)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AuxInt = log2(c)
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v.AddArg(a)
+               y := v_1.Args[0]
+               flags := v.Args[2]
+               v.reset(OpARMSBCshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA (MOVWconst [-1]) x a)
+       // match: (SBC (SRLconst [c] y) x flags)
        // cond:
-       // result: (SUB a x)
+       // result: (RSCshiftRL x y [c] flags)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_0.AuxInt != -1 {
+               if v_0.Op != OpARMSRLconst {
                        break
                }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
                x := v.Args[1]
-               a := v.Args[2]
-               v.reset(OpARMSUB)
-               v.AddArg(a)
+               flags := v.Args[2]
+               v.reset(OpARMRSCshiftRL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA (MOVWconst [0]) _ a)
+       // match: (SBC x (SRAconst [c] y) flags)
        // cond:
-       // result: a
+       // result: (SBCshiftRA x y [c] flags)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_0.AuxInt != 0 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
                        break
                }
-               a := v.Args[2]
-               v.reset(OpCopy)
-               v.Type = a.Type
-               v.AddArg(a)
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               flags := v.Args[2]
+               v.reset(OpARMSBCshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA (MOVWconst [1]) x a)
+       // match: (SBC (SRAconst [c] y) x flags)
        // cond:
-       // result: (ADD x a)
+       // result: (RSCshiftRA x y [c] flags)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_0.AuxInt != 1 {
+               if v_0.Op != OpARMSRAconst {
                        break
                }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
                x := v.Args[1]
-               a := v.Args[2]
-               v.reset(OpARMADD)
+               flags := v.Args[2]
+               v.reset(OpARMRSCshiftRA)
                v.AddArg(x)
-               v.AddArg(a)
+               v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA (MOVWconst [c]) x a)
-       // cond: isPowerOfTwo(c)
-       // result: (ADD (SLLconst <x.Type> [log2(c)] x) a)
+       // match: (SBC x (SLL y z) flags)
+       // cond:
+       // result: (SBCshiftLLreg x y z flags)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               a := v.Args[2]
-               if !(isPowerOfTwo(c)) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLL {
                        break
                }
-               v.reset(OpARMADD)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AuxInt = log2(c)
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v.AddArg(a)
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMSBCshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA (MOVWconst [c]) (MOVWconst [d]) a)
+       // match: (SBC (SLL y z) x flags)
        // cond:
-       // result: (ADDconst [int64(int32(c*d))] a)
+       // result: (RSCshiftLLreg x y z flags)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMSLL {
                        break
                }
-               c := v_0.AuxInt
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMRSCshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (SBC x (SRL y z) flags)
+       // cond:
+       // result: (SBCshiftRLreg x y z flags)
+       for {
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpARMSRL {
                        break
                }
-               d := v_1.AuxInt
-               a := v.Args[2]
-               v.reset(OpARMADDconst)
-               v.AuxInt = int64(int32(c * d))
-               v.AddArg(a)
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMSBCshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMVN(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MVN (MOVWconst [c]))
+       // match: (SBC (SRL y z) x flags)
        // cond:
-       // result: (MOVWconst [^c])
+       // result: (RSCshiftRLreg x y z flags)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMSRL {
                        break
                }
-               c := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = ^c
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMRSCshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpMod16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod16 x y)
+       // match: (SBC x (SRA y z) flags)
        // cond:
-       // result: (MOD (SignExt16to32 x) (SignExt16to32 y))
+       // result: (SBCshiftRAreg x y z flags)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMOD)
-               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRA {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMSBCshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
-}
-func rewriteValueARM_OpMod16u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod16u x y)
+       // match: (SBC (SRA y z) x flags)
        // cond:
-       // result: (MODU (ZeroExt16to32 x) (ZeroExt16to32 y))
+       // result: (RSCshiftRAreg x y z flags)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMODU)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRA {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMRSCshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpMod32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSBCconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod32 x y)
+       // match: (SBCconst [c] (ADDconst [d] x) flags)
        // cond:
-       // result: (MOD x y)
+       // result: (SBCconst [int64(int32(c-d))] x flags)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMOD)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               flags := v.Args[1]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = int64(int32(c - d))
                v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(flags)
                return true
        }
-}
-func rewriteValueARM_OpMod32u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod32u x y)
+       // match: (SBCconst [c] (SUBconst [d] x) flags)
        // cond:
-       // result: (MODU x y)
+       // result: (SBCconst [int64(int32(c+d))] x flags)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMODU)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSUBconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               flags := v.Args[1]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = int64(int32(c + d))
                v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(flags)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpMod8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSBCshiftLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod8 x y)
+       // match: (SBCshiftLL (MOVWconst [c]) x [d] flags)
        // cond:
-       // result: (MOD (SignExt8to32 x) (SignExt8to32 y))
+       // result: (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMOD)
-               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
                v0.AddArg(x)
+               v0.AuxInt = d
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v.AddArg(flags)
                return true
        }
-}
-func rewriteValueARM_OpMod8u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod8u x y)
+       // match: (SBCshiftLL x (MOVWconst [c]) [d] flags)
        // cond:
-       // result: (MODU (ZeroExt8to32 x) (ZeroExt8to32 y))
+       // result: (SBCconst x [int64(uint32(c)<<uint64(d))] flags)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMODU)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMSBCconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(flags)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpMove(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSBCshiftLLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Move [s] _ _ mem)
-       // cond: SizeAndAlign(s).Size() == 0
-       // result: mem
+       // match: (SBCshiftLLreg (MOVWconst [c]) x y flags)
+       // cond:
+       // result: (RSCconst [c] (SLL <x.Type> x y) flags)
        for {
-               s := v.AuxInt
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 0) {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = mem.Type
-               v.AddArg(mem)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               flags := v.Args[3]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v.AddArg(flags)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 1
-       // result: (MOVBstore dst (MOVBUload src mem) mem)
+       // match: (SBCshiftLLreg x y (MOVWconst [c]) flags)
+       // cond:
+       // result: (SBCshiftLL x y [c] flags)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 1) {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVBstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               c := v_2.AuxInt
+               flags := v.Args[3]
+               v.reset(OpARMSBCshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
-       // result: (MOVHstore dst (MOVHUload src mem) mem)
+       return false
+}
+func rewriteValueARM_OpARMSBCshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SBCshiftRA (MOVWconst [c]) x [d] flags)
+       // cond:
+       // result: (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVHstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
-               v0.AddArg(src)
-               v0.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
                v.AddArg(v0)
-               v.AddArg(mem)
+               v.AddArg(flags)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 2
-       // result: (MOVBstore [1] dst (MOVBUload [1] src mem)           (MOVBstore dst (MOVBUload src mem) mem))
+       // match: (SBCshiftRA x (MOVWconst [c]) [d] flags)
+       // cond:
+       // result: (SBCconst x [int64(int32(c)>>uint64(d))] flags)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 2) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = 1
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v0.AuxInt = 1
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               c := v_1.AuxInt
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMSBCconst)
+               v.AddArg(x)
+               v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(flags)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
-       // result: (MOVWstore dst (MOVWload src mem) mem)
+       return false
+}
+func rewriteValueARM_OpARMSBCshiftRAreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SBCshiftRAreg (MOVWconst [c]) x y flags)
+       // cond:
+       // result: (RSCconst [c] (SRA <x.Type> x y) flags)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARMMOVWload, config.fe.TypeUInt32())
-               v0.AddArg(src)
-               v0.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               flags := v.Args[3]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v.AddArg(mem)
+               v.AddArg(flags)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
-       // result: (MOVHstore [2] dst (MOVHUload [2] src mem)           (MOVHstore dst (MOVHUload src mem) mem))
+       // match: (SBCshiftRAreg x y (MOVWconst [c]) flags)
+       // cond:
+       // result: (SBCshiftRA x y [c] flags)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVHstore)
-               v.AuxInt = 2
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
-               v0.AuxInt = 2
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               c := v_2.AuxInt
+               flags := v.Args[3]
+               v.reset(OpARMSBCshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 4
-       // result: (MOVBstore [3] dst (MOVBUload [3] src mem)           (MOVBstore [2] dst (MOVBUload [2] src mem)                      (MOVBstore [1] dst (MOVBUload [1] src mem)                              (MOVBstore dst (MOVBUload src mem) mem))))
+       return false
+}
+func rewriteValueARM_OpARMSBCshiftRL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SBCshiftRL (MOVWconst [c]) x [d] flags)
+       // cond:
+       // result: (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 4) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = 3
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v0.AuxInt = 3
-               v0.AddArg(src)
-               v0.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-               v1.AuxInt = 2
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v2.AuxInt = 2
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-               v3.AuxInt = 1
-               v3.AddArg(dst)
-               v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v4.AuxInt = 1
-               v4.AddArg(src)
-               v4.AddArg(mem)
-               v3.AddArg(v4)
-               v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-               v5.AddArg(dst)
-               v6 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v6.AddArg(src)
-               v6.AddArg(mem)
-               v5.AddArg(v6)
-               v5.AddArg(mem)
-               v3.AddArg(v5)
-               v1.AddArg(v3)
-               v.AddArg(v1)
+               v.AddArg(flags)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 3
-       // result: (MOVBstore [2] dst (MOVBUload [2] src mem)           (MOVBstore [1] dst (MOVBUload [1] src mem)                      (MOVBstore dst (MOVBUload src mem) mem)))
+       // match: (SBCshiftRL x (MOVWconst [c]) [d] flags)
+       // cond:
+       // result: (SBCconst x [int64(uint32(c)>>uint64(d))] flags)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 3) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = 2
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v0.AuxInt = 2
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-               v1.AuxInt = 1
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v2.AuxInt = 1
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-               v3.AddArg(dst)
-               v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v4.AddArg(src)
-               v4.AddArg(mem)
-               v3.AddArg(v4)
-               v3.AddArg(mem)
-               v1.AddArg(v3)
-               v.AddArg(v1)
+               c := v_1.AuxInt
+               d := v.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMSBCconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(flags)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMSBCshiftRLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SBCshiftRLreg (MOVWconst [c]) x y flags)
+       // cond:
+       // result: (RSCconst [c] (SRL <x.Type> x y) flags)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               flags := v.Args[3]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v.AddArg(flags)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512   && SizeAndAlign(s).Align()%4 == 0
-       // result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
+       // match: (SBCshiftRLreg x y (MOVWconst [c]) flags)
+       // cond:
+       // result: (SBCshiftRL x y [c] flags)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0) {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMDUFFCOPY)
-               v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/4))
-               v.AddArg(dst)
-               v.AddArg(src)
-               v.AddArg(mem)
+               c := v_2.AuxInt
+               flags := v.Args[3]
+               v.reset(OpARMSBCshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512  && SizeAndAlign(s).Align()%4 == 0
-       // result: (LoweredMove dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()]) mem)
+       return false
+}
+func rewriteValueARM_OpARMSLL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SLL x (MOVWconst [c]))
+       // cond:
+       // result: (SLLconst x [c&31])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMLoweredMove)
-               v.AddArg(dst)
-               v.AddArg(src)
-               v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
-               v0.AddArg(src)
-               v0.AuxInt = SizeAndAlign(s).Size()
-               v.AddArg(v0)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(OpARMSLLconst)
+               v.AddArg(x)
+               v.AuxInt = c & 31
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0
-       // result: (LoweredMoveU dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()]) mem)
+       return false
+}
+func rewriteValueARM_OpARMSLLconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SLLconst [c] (MOVWconst [d]))
+       // cond:
+       // result: (MOVWconst [int64(uint32(d)<<uint64(c))])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0) {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMLoweredMoveU)
-               v.AddArg(dst)
-               v.AddArg(src)
-               v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
-               v0.AddArg(src)
-               v0.AuxInt = SizeAndAlign(s).Size()
-               v.AddArg(v0)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(uint32(d) << uint64(c))
                return true
        }
        return false
 }
-func rewriteValueARM_OpMul16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSRA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul16 x y)
+       // match: (SRA x (MOVWconst [c]))
        // cond:
-       // result: (MUL x y)
+       // result: (SRAconst x [c&31])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMUL)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMSRAconst)
                v.AddArg(x)
-               v.AddArg(y)
+               v.AuxInt = c & 31
                return true
        }
+       return false
 }
-func rewriteValueARM_OpMul32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSRAcond(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul32 x y)
+       // match: (SRAcond x _ (FlagEQ))
        // cond:
-       // result: (MUL x y)
+       // result: (SRAconst x [31])
+       for {
+               x := v.Args[0]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMSRAconst)
+               v.AddArg(x)
+               v.AuxInt = 31
+               return true
+       }
+       // match: (SRAcond x y (FlagLT_ULT))
+       // cond:
+       // result: (SRA x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMMUL)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMSRA)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpMul32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mul32F x y)
+       // match: (SRAcond x _ (FlagLT_UGT))
        // cond:
-       // result: (MULF x y)
+       // result: (SRAconst x [31])
+       for {
+               x := v.Args[0]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMSRAconst)
+               v.AddArg(x)
+               v.AuxInt = 31
+               return true
+       }
+       // match: (SRAcond x y (FlagGT_ULT))
+       // cond:
+       // result: (SRA x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMMULF)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMSRA)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
+       // match: (SRAcond x _ (FlagGT_UGT))
+       // cond:
+       // result: (SRAconst x [31])
+       for {
+               x := v.Args[0]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMSRAconst)
+               v.AddArg(x)
+               v.AuxInt = 31
+               return true
+       }
+       return false
 }
-func rewriteValueARM_OpMul32uhilo(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSRAconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul32uhilo x y)
+       // match: (SRAconst [c] (MOVWconst [d]))
        // cond:
-       // result: (MULLU x y)
+       // result: (MOVWconst [int64(int32(d)>>uint64(c))])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMULLU)
-               v.AddArg(x)
-               v.AddArg(y)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(int32(d) >> uint64(c))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpMul64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul64F x y)
+       // match: (SRL x (MOVWconst [c]))
        // cond:
-       // result: (MULD x y)
+       // result: (SRLconst x [c&31])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMULD)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMSRLconst)
                v.AddArg(x)
-               v.AddArg(y)
+               v.AuxInt = c & 31
                return true
        }
+       return false
 }
-func rewriteValueARM_OpMul8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSRLconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul8 x y)
+       // match: (SRLconst [c] (MOVWconst [d]))
        // cond:
-       // result: (MUL x y)
+       // result: (MOVWconst [int64(uint32(d)>>uint64(c))])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMUL)
-               v.AddArg(x)
-               v.AddArg(y)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(uint32(d) >> uint64(c))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpNeg16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg16 x)
+       // match: (SUB (MOVWconst [c]) x)
        // cond:
-       // result: (RSBconst [0] x)
+       // result: (RSBconst [c] x)
        for {
-               x := v.Args[0]
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
                v.reset(OpARMRSBconst)
-               v.AuxInt = 0
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (SUB x (MOVWconst [c]))
+       // cond:
+       // result: (SUBconst [c] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMSUBconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (SUB x (SLLconst [c] y))
+       // cond:
+       // result: (SUBshiftLL x y [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMSUBshiftLL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpNeg32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neg32 x)
+       // match: (SUB (SLLconst [c] y) x)
        // cond:
-       // result: (RSBconst [0] x)
+       // result: (RSBshiftLL x y [c])
        for {
-               x := v.Args[0]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = 0
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMRSBshiftLL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpNeg32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neg32F x)
+       // match: (SUB x (SRLconst [c] y))
        // cond:
-       // result: (MULF (MOVFconst [int64(math.Float64bits(-1))]) x)
+       // result: (SUBshiftRL x y [c])
        for {
                x := v.Args[0]
-               v.reset(OpARMMULF)
-               v0 := b.NewValue0(v.Line, OpARMMOVFconst, config.fe.TypeFloat32())
-               v0.AuxInt = int64(math.Float64bits(-1))
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMSUBshiftRL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpNeg64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neg64F x)
+       // match: (SUB (SRLconst [c] y) x)
        // cond:
-       // result: (MULD (MOVDconst [int64(math.Float64bits(-1))]) x)
+       // result: (RSBshiftRL x y [c])
        for {
-               x := v.Args[0]
-               v.reset(OpARMMULD)
-               v0 := b.NewValue0(v.Line, OpARMMOVDconst, config.fe.TypeFloat64())
-               v0.AuxInt = int64(math.Float64bits(-1))
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMRSBshiftRL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpNeg8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neg8 x)
+       // match: (SUB x (SRAconst [c] y))
        // cond:
-       // result: (RSBconst [0] x)
+       // result: (SUBshiftRA x y [c])
        for {
                x := v.Args[0]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = 0
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMSUBshiftRA)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpNeq16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neq16 x y)
+       // match: (SUB (SRAconst [c] y) x)
        // cond:
-       // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (RSBshiftRA x y [c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMNotEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMRSBshiftRA)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpNeq32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neq32 x y)
+       // match: (SUB x (SLL y z))
        // cond:
-       // result: (NotEqual (CMP x y))
+       // result: (SUBshiftLLreg x y z)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMNotEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLL {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMSUBshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpNeq32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neq32F x y)
+       // match: (SUB (SLL y z) x)
        // cond:
-       // result: (NotEqual (CMPF x y))
+       // result: (RSBshiftLLreg x y z)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMNotEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLL {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMRSBshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpNeq64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neq64F x y)
+       // match: (SUB x (SRL y z))
        // cond:
-       // result: (NotEqual (CMPD x y))
+       // result: (SUBshiftRLreg x y z)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMNotEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRL {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMSUBshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpNeq8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neq8 x y)
+       // match: (SUB (SRL y z) x)
        // cond:
-       // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (RSBshiftRLreg x y z)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMNotEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRL {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMRSBshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpNeqB(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (NeqB x y)
+       // match: (SUB x (SRA y z))
        // cond:
-       // result: (XOR x y)
+       // result: (SUBshiftRAreg x y z)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMXOR)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRA {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMSUBshiftRAreg)
                v.AddArg(x)
                v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (NeqPtr x y)
+       // match: (SUB (SRA y z) x)
        // cond:
-       // result: (NotEqual (CMP x y))
+       // result: (RSBshiftRAreg x y z)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMNotEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRA {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMRSBshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (NilCheck ptr mem)
+       // match: (SUB x x)
        // cond:
-       // result: (LoweredNilCheck ptr mem)
+       // result: (MOVWconst [0])
        for {
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMLoweredNilCheck)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
+       return false
 }
-func rewriteValueARM_OpNot(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Not x)
+       // match: (SUBS (MOVWconst [c]) x)
        // cond:
-       // result: (XORconst [1] x)
+       // result: (RSBSconst [c] x)
        for {
-               x := v.Args[0]
-               v.reset(OpARMXORconst)
-               v.AuxInt = 1
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMRSBSconst)
+               v.AuxInt = c
                v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpARMNotEqual(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (NotEqual (FlagEQ))
+       // match: (SUBS x (MOVWconst [c]))
        // cond:
-       // result: (MOVWconst [0])
+       // result: (SUBSconst [c] x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v_1.AuxInt
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (NotEqual (FlagLT_ULT))
+       // match: (SUBS x (SLLconst [c] y))
        // cond:
-       // result: (MOVWconst [1])
+       // result: (SUBSshiftLL x y [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMSUBSshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (NotEqual (FlagLT_UGT))
+       // match: (SUBS (SLLconst [c] y) x)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (RSBSshiftLL x y [c])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               if v_0.Op != OpARMSLLconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMRSBSshiftLL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (NotEqual (FlagGT_ULT))
+       // match: (SUBS x (SRLconst [c] y))
        // cond:
-       // result: (MOVWconst [1])
+       // result: (SUBSshiftRL x y [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMSUBSshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (NotEqual (FlagGT_UGT))
+       // match: (SUBS (SRLconst [c] y) x)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (RSBSshiftRL x y [c])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               if v_0.Op != OpARMSRLconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMRSBSshiftRL)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (NotEqual (InvertFlags x))
+       // match: (SUBS x (SRAconst [c] y))
        // cond:
-       // result: (NotEqual x)
+       // result: (SUBSshiftRA x y [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMNotEqual)
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMSUBSshiftRA)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (OR (MOVWconst [c]) x)
+       // match: (SUBS (SRAconst [c] y) x)
        // cond:
-       // result: (ORconst [c] x)
+       // result: (RSBSshiftRA x y [c])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMSRAconst {
                        break
                }
                c := v_0.AuxInt
+               y := v_0.Args[0]
                x := v.Args[1]
-               v.reset(OpARMORconst)
-               v.AuxInt = c
+               v.reset(OpARMRSBSshiftRA)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (OR x (MOVWconst [c]))
+       // match: (SUBS x (SLL y z))
        // cond:
-       // result: (ORconst [c] x)
+       // result: (SUBSshiftLLreg x y z)
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpARMSLL {
                        break
                }
-               c := v_1.AuxInt
-               v.reset(OpARMORconst)
-               v.AuxInt = c
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMSUBSshiftLLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (OR x x)
+       // match: (SUBS (SLL y z) x)
        // cond:
-       // result: x
+       // result: (RSBSshiftLLreg x y z)
        for {
-               x := v.Args[0]
-               if x != v.Args[1] {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLL {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMRSBSshiftLLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMORconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ORconst [0] x)
+       // match: (SUBS x (SRL y z))
        // cond:
-       // result: x
+       // result: (SUBSshiftRLreg x y z)
        for {
-               if v.AuxInt != 0 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRL {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMSUBSshiftRLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (ORconst [c] _)
-       // cond: int32(c)==-1
-       // result: (MOVWconst [-1])
+       // match: (SUBS (SRL y z) x)
+       // cond:
+       // result: (RSBSshiftRLreg x y z)
        for {
-               c := v.AuxInt
-               if !(int32(c) == -1) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = -1
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMRSBSshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (ORconst [c] (MOVWconst [d]))
+       // match: (SUBS x (SRA y z))
        // cond:
-       // result: (MOVWconst [c|d])
+       // result: (SUBSshiftRAreg x y z)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRA {
                        break
                }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = c | d
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMSUBSshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (ORconst [c] (ORconst [d] x))
+       // match: (SUBS (SRA y z) x)
        // cond:
-       // result: (ORconst [c|d] x)
+       // result: (RSBSshiftRAreg x y z)
        for {
-               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMORconst {
+               if v_0.Op != OpARMSRA {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMORconst)
-               v.AuxInt = c | d
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMRSBSshiftRAreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
        return false
 }
-func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBSshiftLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (OffPtr [off] ptr:(SP))
+       // match: (SUBSshiftLL (MOVWconst [c]) x [d])
        // cond:
-       // result: (MOVWaddr [off] ptr)
+       // result: (RSBSconst [c] (SLLconst <x.Type> x [d]))
        for {
-               off := v.AuxInt
-               ptr := v.Args[0]
-               if ptr.Op != OpSP {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWaddr)
-               v.AuxInt = off
-               v.AddArg(ptr)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMRSBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
                return true
        }
-       // match: (OffPtr [off] ptr)
+       // match: (SUBSshiftLL x (MOVWconst [c]) [d])
        // cond:
-       // result: (ADDconst [off] ptr)
+       // result: (SUBSconst x [int64(uint32(c)<<uint64(d))])
        for {
-               off := v.AuxInt
-               ptr := v.Args[0]
-               v.reset(OpARMADDconst)
-               v.AuxInt = off
-               v.AddArg(ptr)
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMSUBSconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) << uint64(d))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpOr16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or16 x y)
+       // match: (SUBSshiftLLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (OR x y)
+       // result: (RSBSconst [c] (SLL <x.Type> x y))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMOR)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMRSBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpOr32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Or32 x y)
+       // match: (SUBSshiftLLreg x y (MOVWconst [c]))
        // cond:
-       // result: (OR x y)
+       // result: (SUBSshiftLL x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMOR)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMSUBSshiftLL)
                v.AddArg(x)
                v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpOr8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBSshiftRA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or8 x y)
+       // match: (SUBSshiftRA (MOVWconst [c]) x [d])
        // cond:
-       // result: (OR x y)
+       // result: (RSBSconst [c] (SRAconst <x.Type> x [d]))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMRSBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
+               return true
+       }
+       // match: (SUBSshiftRA x (MOVWconst [c]) [d])
+       // cond:
+       // result: (SUBSconst x [int64(int32(c)>>uint64(d))])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMOR)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMSUBSconst)
                v.AddArg(x)
-               v.AddArg(y)
+               v.AuxInt = int64(int32(c) >> uint64(d))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpOrB(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (OrB x y)
+       // match: (SUBSshiftRAreg (MOVWconst [c]) x y)
        // cond:
-       // result: (OR x y)
+       // result: (RSBSconst [c] (SRA <x.Type> x y))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMRSBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (SUBSshiftRAreg x y (MOVWconst [c]))
+       // cond:
+       // result: (SUBSshiftRA x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMOR)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMSUBSshiftRA)
                v.AddArg(x)
                v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBSshiftRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSB (MOVWconst [c]) x)
+       // match: (SUBSshiftRL (MOVWconst [c]) x [d])
        // cond:
-       // result: (SUBconst [c] x)
+       // result: (RSBSconst [c] (SRLconst <x.Type> x [d]))
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -6470,14 +15054,18 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               v.reset(OpARMSUBconst)
+               d := v.AuxInt
+               v.reset(OpARMRSBSconst)
                v.AuxInt = c
-               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
                return true
        }
-       // match: (RSB x (MOVWconst [c]))
+       // match: (SUBSshiftRL x (MOVWconst [c]) [d])
        // cond:
-       // result: (RSBconst [c] x)
+       // result: (SUBSconst x [int64(uint32(c)>>uint64(d))])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
@@ -6485,65 +15073,88 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
                        break
                }
                c := v_1.AuxInt
-               v.reset(OpARMRSBconst)
-               v.AuxInt = c
+               d := v.AuxInt
+               v.reset(OpARMSUBSconst)
                v.AddArg(x)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMRSBconst(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSBconst [c] (MOVWconst [d]))
+       // match: (SUBSshiftRLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (MOVWconst [int64(int32(c-d))])
+       // result: (RSBSconst [c] (SRL <x.Type> x y))
        for {
-               c := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(int32(c - d))
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMRSBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (RSBconst [c] (RSBconst [d] x))
+       // match: (SUBSshiftRLreg x y (MOVWconst [c]))
        // cond:
-       // result: (ADDconst [int64(int32(c-d))] x)
+       // result: (SUBSshiftRL x y [c])
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMRSBconst {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMADDconst)
-               v.AuxInt = int64(int32(c - d))
+               c := v_2.AuxInt
+               v.reset(OpARMSUBSshiftRL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (RSBconst [c] (ADDconst [d] x))
+       return false
+}
+func rewriteValueARM_OpARMSUBconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SUBconst [0] x)
        // cond:
-       // result: (RSBconst [int64(int32(c-d))] x)
+       // result: x
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (SUBconst [c] (MOVWconst [d]))
+       // cond:
+       // result: (MOVWconst [int64(int32(d-c))])
        for {
                c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
                d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = int64(int32(c - d))
-               v.AddArg(x)
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(int32(d - c))
                return true
        }
-       // match: (RSBconst [c] (SUBconst [d] x))
+       // match: (SUBconst [c] (SUBconst [d] x))
        // cond:
-       // result: (RSBconst [int64(int32(c+d))] x)
+       // result: (ADDconst [int64(int32(-c-d))] x)
        for {
                c := v.AuxInt
                v_0 := v.Args[0]
@@ -6552,19 +15163,14 @@ func rewriteValueARM_OpARMRSBconst(v *Value, config *Config) bool {
                }
                d := v_0.AuxInt
                x := v_0.Args[0]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = int64(int32(c + d))
+               v.reset(OpARMADDconst)
+               v.AuxInt = int64(int32(-c - d))
                v.AddArg(x)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMRSCconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (RSCconst [c] (ADDconst [d] x) flags)
+       // match: (SUBconst [c] (ADDconst [d] x))
        // cond:
-       // result: (RSCconst [int64(int32(c-d))] x flags)
+       // result: (ADDconst [int64(int32(-c+d))] x)
        for {
                c := v.AuxInt
                v_0 := v.Args[0]
@@ -6573,1359 +15179,1229 @@ func rewriteValueARM_OpARMRSCconst(v *Value, config *Config) bool {
                }
                d := v_0.AuxInt
                x := v_0.Args[0]
-               flags := v.Args[1]
-               v.reset(OpARMRSCconst)
-               v.AuxInt = int64(int32(c - d))
+               v.reset(OpARMADDconst)
+               v.AuxInt = int64(int32(-c + d))
                v.AddArg(x)
-               v.AddArg(flags)
                return true
        }
-       // match: (RSCconst [c] (SUBconst [d] x) flags)
+       // match: (SUBconst [c] (RSBconst [d] x))
        // cond:
-       // result: (RSCconst [int64(int32(c+d))] x flags)
+       // result: (RSBconst [int64(int32(-c+d))] x)
        for {
                c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMSUBconst {
+               if v_0.Op != OpARMRSBconst {
                        break
                }
                d := v_0.AuxInt
                x := v_0.Args[0]
-               flags := v.Args[1]
-               v.reset(OpARMRSCconst)
-               v.AuxInt = int64(int32(c + d))
+               v.reset(OpARMRSBconst)
+               v.AuxInt = int64(int32(-c + d))
                v.AddArg(x)
-               v.AddArg(flags)
                return true
        }
        return false
 }
-func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh16Ux16 x y)
-       // cond:
-       // result: (SRL (ZeroExt16to32 x) (ZeroExt16to32 y))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRL)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
-               return true
-       }
-}
-func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16Ux32 x y)
+       // match: (SUBshiftLL (MOVWconst [c]) x [d])
        // cond:
-       // result: (SRL (ZeroExt16to32 x) y)
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRL)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v.AddArg(y)
-               return true
-       }
-}
-func rewriteValueARM_OpRsh16Ux64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh16Ux64 x (Const64 [c]))
-       // cond: uint64(c) < 16
-       // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 16) {
-                       break
-               }
-               v.reset(OpARMSRLconst)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v0.AuxInt = 16
-               v.AddArg(v0)
-               v.AuxInt = c + 16
-               return true
-       }
-       // match: (Rsh16Ux64 _ (Const64 [c]))
-       // cond: uint64(c) >= 16
-       // result: (Const16 [0])
+       // result: (RSBconst [c] (SLLconst <x.Type> x [d]))
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 16) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpConst16)
-               v.AuxInt = 0
-               return true
-       }
-       return false
-}
-func rewriteValueARM_OpRsh16Ux8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh16Ux8  x y)
-       // cond:
-       // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRL)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
-               return true
-       }
-}
-func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh16x16 x y)
-       // cond:
-       // result: (SRA (SignExt16to32 x) (ZeroExt16to32 y))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRA)
-               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMRSBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
                v0.AddArg(x)
+               v0.AuxInt = d
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
                return true
        }
-}
-func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh16x32 x y)
+       // match: (SUBshiftLL x (MOVWconst [c]) [d])
        // cond:
-       // result: (SRA (SignExt16to32 x) y)
+       // result: (SUBconst x [int64(uint32(c)<<uint64(d))])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRA)
-               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v.AddArg(y)
-               return true
-       }
-}
-func rewriteValueARM_OpRsh16x64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh16x64 x (Const64 [c]))
-       // cond: uint64(c) < 16
-       // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 16) {
-                       break
-               }
-               v.reset(OpARMSRAconst)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v0.AuxInt = 16
-               v.AddArg(v0)
-               v.AuxInt = c + 16
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMSUBconst)
+               v.AddArg(x)
+               v.AuxInt = int64(uint32(c) << uint64(d))
                return true
        }
-       // match: (Rsh16x64 x (Const64 [c]))
-       // cond: uint64(c) >= 16
-       // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
+       // match: (SUBshiftLL x (SLLconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               if v_1.Op != OpARMSLLconst {
+                       break
+               }
+               if x != v_1.Args[0] {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) >= 16) {
+               d := v.AuxInt
+               if !(c == d) {
                        break
                }
-               v.reset(OpARMSRAconst)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v0.AuxInt = 16
-               v.AddArg(v0)
-               v.AuxInt = 31
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValueARM_OpRsh16x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftLLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x8  x y)
+       // match: (SUBshiftLLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
+       // result: (RSBconst [c] (SLL <x.Type> x y))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRA)
-               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMRSBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
                v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
                return true
        }
-}
-func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh32Ux16 x y)
+       // match: (SUBshiftLLreg x y (MOVWconst [c]))
        // cond:
-       // result: (SRL x (ZeroExt16to32 y))
+       // result: (SUBshiftLL x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSRL)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMSUBshiftLL)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftRA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux32 x y)
+       // match: (SUBshiftRA (MOVWconst [c]) x [d])
        // cond:
-       // result: (SRL x y)
+       // result: (RSBconst [c] (SRAconst <x.Type> x [d]))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRL)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMRSBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpRsh32Ux64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh32Ux64 x (Const64 [c]))
-       // cond: uint64(c) < 32
-       // result: (SRLconst x [c])
+       // match: (SUBshiftRA x (MOVWconst [c]) [d])
+       // cond:
+       // result: (SUBconst x [int64(int32(c)>>uint64(d))])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) < 32) {
-                       break
-               }
-               v.reset(OpARMSRLconst)
+               d := v.AuxInt
+               v.reset(OpARMSUBconst)
                v.AddArg(x)
-               v.AuxInt = c
+               v.AuxInt = int64(int32(c) >> uint64(d))
                return true
        }
-       // match: (Rsh32Ux64 _ (Const64 [c]))
-       // cond: uint64(c) >= 32
-       // result: (Const32 [0])
+       // match: (SUBshiftRA x (SRAconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
        for {
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               if v_1.Op != OpARMSRAconst {
+                       break
+               }
+               if x != v_1.Args[0] {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) >= 32) {
+               d := v.AuxInt
+               if !(c == d) {
                        break
                }
-               v.reset(OpConst32)
+               v.reset(OpARMMOVWconst)
                v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValueARM_OpRsh32Ux8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftRAreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux8  x y)
+       // match: (SUBshiftRAreg (MOVWconst [c]) x y)
        // cond:
-       // result: (SRL x (ZeroExt8to32 y))
+       // result: (RSBconst [c] (SRA <x.Type> x y))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRL)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMRSBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+               v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh32x16 x y)
+       // match: (SUBshiftRAreg x y (MOVWconst [c]))
        // cond:
-       // result: (SRA x (ZeroExt16to32 y))
+       // result: (SUBshiftRA x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSRA)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMSUBshiftRA)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x32 x y)
+       // match: (SUBshiftRL (MOVWconst [c]) x [d])
        // cond:
-       // result: (SRA x y)
+       // result: (RSBconst [c] (SRLconst <x.Type> x [d]))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRA)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMRSBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpRsh32x64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh32x64 x (Const64 [c]))
-       // cond: uint64(c) < 32
-       // result: (SRAconst x [c])
+       // match: (SUBshiftRL x (MOVWconst [c]) [d])
+       // cond:
+       // result: (SUBconst x [int64(uint32(c)>>uint64(d))])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) < 32) {
-                       break
-               }
-               v.reset(OpARMSRAconst)
+               d := v.AuxInt
+               v.reset(OpARMSUBconst)
                v.AddArg(x)
-               v.AuxInt = c
+               v.AuxInt = int64(uint32(c) >> uint64(d))
                return true
        }
-       // match: (Rsh32x64 x (Const64 [c]))
-       // cond: uint64(c) >= 32
-       // result: (SRAconst x [31])
+       // match: (SUBshiftRL x (SRLconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               if v_1.Op != OpARMSRLconst {
+                       break
+               }
+               if x != v_1.Args[0] {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) >= 32) {
+               d := v.AuxInt
+               if !(c == d) {
                        break
                }
-               v.reset(OpARMSRAconst)
-               v.AddArg(x)
-               v.AuxInt = 31
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValueARM_OpRsh32x8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh32x8  x y)
-       // cond:
-       // result: (SRA x (ZeroExt8to32 y))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRA)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v0.AddArg(y)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftRLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux16 x y)
+       // match: (SUBshiftRLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (SRL (ZeroExt8to32 x) (ZeroExt16to32 y))
+       // result: (RSBconst [c] (SRL <x.Type> x y))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRL)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMRSBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
                v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
                return true
        }
-}
-func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh8Ux32 x y)
+       // match: (SUBshiftRLreg x y (MOVWconst [c]))
        // cond:
-       // result: (SRL (ZeroExt8to32 x) y)
+       // result: (SUBshiftRL x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSRL)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMSUBshiftRL)
+               v.AddArg(x)
                v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpRsh8Ux64(v *Value, config *Config) bool {
+func rewriteValueARM_OpSelect0(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux64 x (Const64 [c]))
-       // cond: uint64(c) < 8
-       // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+       // match: (Select0 <t> x)
+       // cond: t.IsFlags()
+       // result: (Carry x)
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 8) {
+               if !(t.IsFlags()) {
                        break
                }
-               v.reset(OpARMSRLconst)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v0.AuxInt = 24
-               v.AddArg(v0)
-               v.AuxInt = c + 24
+               v.reset(OpARMCarry)
+               v.AddArg(x)
                return true
        }
-       // match: (Rsh8Ux64 _ (Const64 [c]))
-       // cond: uint64(c) >= 8
-       // result: (Const8 [0])
+       // match: (Select0 <t> x)
+       // cond: !t.IsFlags()
+       // result: (LoweredSelect0 x)
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 8) {
+               t := v.Type
+               x := v.Args[0]
+               if !(!t.IsFlags()) {
                        break
                }
-               v.reset(OpConst8)
-               v.AuxInt = 0
+               v.reset(OpARMLoweredSelect0)
+               v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueARM_OpRsh8Ux8(v *Value, config *Config) bool {
+func rewriteValueARM_OpSelect1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux8  x y)
+       // match: (Select1 x)
        // cond:
-       // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
+       // result: (LoweredSelect1 x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRL)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v.reset(OpARMLoweredSelect1)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool {
+func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x16 x y)
+       // match: (SignExt16to32 x)
        // cond:
-       // result: (SRA (SignExt8to32 x) (ZeroExt16to32 y))
+       // result: (MOVHreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRA)
-               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v.reset(OpARMMOVHreg)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool {
+func rewriteValueARM_OpSignExt8to16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x32 x y)
+       // match: (SignExt8to16 x)
        // cond:
-       // result: (SRA (SignExt8to32 x) y)
+       // result: (MOVBreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRA)
-               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v.AddArg(y)
+               v.reset(OpARMMOVBreg)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM_OpRsh8x64(v *Value, config *Config) bool {
+func rewriteValueARM_OpSignExt8to32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x64 x (Const64 [c]))
-       // cond: uint64(c) < 8
-       // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+       // match: (SignExt8to32 x)
+       // cond:
+       // result: (MOVBreg x)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 8) {
-                       break
-               }
-               v.reset(OpARMSRAconst)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v0.AuxInt = 24
-               v.AddArg(v0)
-               v.AuxInt = c + 24
+               v.reset(OpARMMOVBreg)
+               v.AddArg(x)
                return true
        }
-       // match: (Rsh8x64 x (Const64 [c]))
-       // cond: uint64(c) >= 8
-       // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
+}
+func rewriteValueARM_OpSignmask(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Signmask x)
+       // cond:
+       // result: (SRAconst x [31])
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 8) {
-                       break
-               }
                v.reset(OpARMSRAconst)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v0.AuxInt = 24
-               v.AddArg(v0)
+               v.AddArg(x)
                v.AuxInt = 31
                return true
        }
-       return false
 }
-func rewriteValueARM_OpRsh8x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpSqrt(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x8  x y)
+       // match: (Sqrt x)
        // cond:
-       // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
+       // result: (SQRTD x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRA)
-               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v.reset(OpARMSQRTD)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
+func rewriteValueARM_OpStaticCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SBC (MOVWconst [c]) x flags)
+       // match: (StaticCall [argwid] {target} mem)
        // cond:
-       // result: (RSCconst [c] x flags)
+       // result: (CALLstatic [argwid] {target} mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               argwid := v.AuxInt
+               target := v.Aux
+               mem := v.Args[0]
+               v.reset(OpARMCALLstatic)
+               v.AuxInt = argwid
+               v.Aux = target
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValueARM_OpStore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Store [1] ptr val mem)
+       // cond:
+       // result: (MOVBstore ptr val mem)
+       for {
+               if v.AuxInt != 1 {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               flags := v.Args[2]
-               v.reset(OpARMRSCconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               v.AddArg(flags)
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVBstore)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (SBC x (MOVWconst [c]) flags)
+       // match: (Store [2] ptr val mem)
        // cond:
-       // result: (SBCconst [c] x flags)
+       // result: (MOVHstore ptr val mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v.AuxInt != 2 {
                        break
                }
-               c := v_1.AuxInt
-               flags := v.Args[2]
-               v.reset(OpARMSBCconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               v.AddArg(flags)
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVHstore)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMSBCconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SBCconst [c] (ADDconst [d] x) flags)
-       // cond:
-       // result: (SBCconst [int64(int32(c-d))] x flags)
+       // match: (Store [4] ptr val mem)
+       // cond: !is32BitFloat(val.Type)
+       // result: (MOVWstore ptr val mem)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+               if v.AuxInt != 4 {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               flags := v.Args[1]
-               v.reset(OpARMSBCconst)
-               v.AuxInt = int64(int32(c - d))
-               v.AddArg(x)
-               v.AddArg(flags)
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(!is32BitFloat(val.Type)) {
+                       break
+               }
+               v.reset(OpARMMOVWstore)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (SBCconst [c] (SUBconst [d] x) flags)
-       // cond:
-       // result: (SBCconst [int64(int32(c+d))] x flags)
+       // match: (Store [4] ptr val mem)
+       // cond: is32BitFloat(val.Type)
+       // result: (MOVFstore ptr val mem)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSUBconst {
+               if v.AuxInt != 4 {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               flags := v.Args[1]
-               v.reset(OpARMSBCconst)
-               v.AuxInt = int64(int32(c + d))
-               v.AddArg(x)
-               v.AddArg(flags)
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32BitFloat(val.Type)) {
+                       break
+               }
+               v.reset(OpARMMOVFstore)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Store [8] ptr val mem)
+       // cond: is64BitFloat(val.Type)
+       // result: (MOVDstore ptr val mem)
+       for {
+               if v.AuxInt != 8 {
+                       break
+               }
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is64BitFloat(val.Type)) {
+                       break
+               }
+               v.reset(OpARMMOVDstore)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMSLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpSub16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SLL x (MOVWconst [c]))
+       // match: (Sub16 x y)
        // cond:
-       // result: (SLLconst x [c&31])
+       // result: (SUB x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpARMSLLconst)
+               y := v.Args[1]
+               v.reset(OpARMSUB)
                v.AddArg(x)
-               v.AuxInt = c & 31
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSLLconst(v *Value, config *Config) bool {
+func rewriteValueARM_OpSub32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SLLconst [c] (MOVWconst [d]))
+       // match: (Sub32 x y)
        // cond:
-       // result: (MOVWconst [int64(uint32(d)<<uint64(c))])
+       // result: (SUB x y)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(uint32(d) << uint64(c))
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSUB)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpSub32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SRA x (MOVWconst [c]))
+       // match: (Sub32F x y)
        // cond:
-       // result: (SRAconst x [c&31])
+       // result: (SUBF x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpARMSRAconst)
+               y := v.Args[1]
+               v.reset(OpARMSUBF)
                v.AddArg(x)
-               v.AuxInt = c & 31
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSRAconst(v *Value, config *Config) bool {
+func rewriteValueARM_OpSub32carry(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SRAconst [c] (MOVWconst [d]))
+       // match: (Sub32carry x y)
        // cond:
-       // result: (MOVWconst [int64(int32(d)>>uint64(c))])
+       // result: (SUBS x y)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(int32(d) >> uint64(c))
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSUBS)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpSub32withcarry(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SRL x (MOVWconst [c]))
+       // match: (Sub32withcarry x y c)
        // cond:
-       // result: (SRLconst x [c&31])
+       // result: (SBC x y c)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpARMSRLconst)
+               y := v.Args[1]
+               c := v.Args[2]
+               v.reset(OpARMSBC)
                v.AddArg(x)
-               v.AuxInt = c & 31
+               v.AddArg(y)
+               v.AddArg(c)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSRLconst(v *Value, config *Config) bool {
+func rewriteValueARM_OpSub64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SRLconst [c] (MOVWconst [d]))
+       // match: (Sub64F x y)
        // cond:
-       // result: (MOVWconst [int64(uint32(d)>>uint64(c))])
+       // result: (SUBD x y)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(uint32(d) >> uint64(c))
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSUBD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
+func rewriteValueARM_OpSub8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUB (MOVWconst [c]) x)
-       // cond:
-       // result: (RSBconst [c] x)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               return true
-       }
-       // match: (SUB x (MOVWconst [c]))
+       // match: (Sub8 x y)
        // cond:
-       // result: (SUBconst [c] x)
+       // result: (SUB x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpARMSUBconst)
-               v.AuxInt = c
+               y := v.Args[1]
+               v.reset(OpARMSUB)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SUB x x)
+}
+func rewriteValueARM_OpSubPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SubPtr x y)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (SUB x y)
        for {
                x := v.Args[0]
-               if x != v.Args[1] {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               y := v.Args[1]
+               v.reset(OpARMSUB)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
+func rewriteValueARM_OpTrunc16to8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBS (MOVWconst [c]) x)
+       // match: (Trunc16to8 x)
        // cond:
-       // result: (RSBSconst [c] x)
+       // result: x
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(OpARMRSBSconst)
-               v.AuxInt = c
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
                return true
        }
-       // match: (SUBS x (MOVWconst [c]))
+}
+func rewriteValueARM_OpTrunc32to16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Trunc32to16 x)
        // cond:
-       // result: (SUBSconst [c] x)
+       // result: x
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpARMSUBSconst)
-               v.AuxInt = c
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSUBconst(v *Value, config *Config) bool {
+func rewriteValueARM_OpTrunc32to8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBconst [0] x)
+       // match: (Trunc32to8 x)
        // cond:
        // result: x
        for {
-               if v.AuxInt != 0 {
-                       break
-               }
                x := v.Args[0]
                v.reset(OpCopy)
                v.Type = x.Type
                v.AddArg(x)
                return true
        }
-       // match: (SUBconst [c] (MOVWconst [d]))
+}
+func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (XOR (MOVWconst [c]) x)
        // cond:
-       // result: (MOVWconst [int64(int32(d-c))])
+       // result: (XORconst [c] x)
        for {
-               c := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(int32(d - c))
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMXORconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (SUBconst [c] (SUBconst [d] x))
+       // match: (XOR x (MOVWconst [c]))
        // cond:
-       // result: (ADDconst [int64(int32(-c-d))] x)
+       // result: (XORconst [c] x)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSUBconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMADDconst)
-               v.AuxInt = int64(int32(-c - d))
+               c := v_1.AuxInt
+               v.reset(OpARMXORconst)
+               v.AuxInt = c
                v.AddArg(x)
                return true
        }
-       // match: (SUBconst [c] (ADDconst [d] x))
+       // match: (XOR x (SLLconst [c] y))
        // cond:
-       // result: (ADDconst [int64(int32(-c+d))] x)
+       // result: (XORshiftLL x y [c])
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMADDconst)
-               v.AuxInt = int64(int32(-c + d))
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMXORshiftLL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (SUBconst [c] (RSBconst [d] x))
+       // match: (XOR (SLLconst [c] y) x)
        // cond:
-       // result: (RSBconst [int64(int32(-c+d))] x)
+       // result: (XORshiftLL x y [c])
        for {
-               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMRSBconst {
+               if v_0.Op != OpARMSLLconst {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = int64(int32(-c + d))
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMXORshiftLL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       return false
-}
-func rewriteValueARM_OpSelect0(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Select0 <t> x)
-       // cond: t.IsFlags()
-       // result: (Carry x)
+       // match: (XOR x (SRLconst [c] y))
+       // cond:
+       // result: (XORshiftRL x y [c])
        for {
-               t := v.Type
                x := v.Args[0]
-               if !(t.IsFlags()) {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
                        break
                }
-               v.reset(OpARMCarry)
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMXORshiftRL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       // match: (Select0 <t> x)
-       // cond: !t.IsFlags()
-       // result: (LoweredSelect0 x)
+       // match: (XOR (SRLconst [c] y) x)
+       // cond:
+       // result: (XORshiftRL x y [c])
        for {
-               t := v.Type
-               x := v.Args[0]
-               if !(!t.IsFlags()) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRLconst {
                        break
                }
-               v.reset(OpARMLoweredSelect0)
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMXORshiftRL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-       return false
-}
-func rewriteValueARM_OpSelect1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Select1 x)
+       // match: (XOR x (SRAconst [c] y))
        // cond:
-       // result: (LoweredSelect1 x)
+       // result: (XORshiftRA x y [c])
        for {
                x := v.Args[0]
-               v.reset(OpARMLoweredSelect1)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMXORshiftRA)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SignExt16to32 x)
+       // match: (XOR (SRAconst [c] y) x)
        // cond:
-       // result: (MOVHreg x)
+       // result: (XORshiftRA x y [c])
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVHreg)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMXORshiftRA)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueARM_OpSignExt8to16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SignExt8to16 x)
+       // match: (XOR x (SLL y z))
        // cond:
-       // result: (MOVBreg x)
+       // result: (XORshiftLLreg x y z)
        for {
                x := v.Args[0]
-               v.reset(OpARMMOVBreg)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLL {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMXORshiftLLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpSignExt8to32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SignExt8to32 x)
+       // match: (XOR (SLL y z) x)
        // cond:
-       // result: (MOVBreg x)
+       // result: (XORshiftLLreg x y z)
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVBreg)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLL {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMXORshiftLLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpSignmask(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Signmask x)
+       // match: (XOR x (SRL y z))
        // cond:
-       // result: (SRAconst x [31])
+       // result: (XORshiftRLreg x y z)
        for {
                x := v.Args[0]
-               v.reset(OpARMSRAconst)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRL {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMXORshiftRLreg)
                v.AddArg(x)
-               v.AuxInt = 31
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpSqrt(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Sqrt x)
+       // match: (XOR (SRL y z) x)
        // cond:
-       // result: (SQRTD x)
+       // result: (XORshiftRLreg x y z)
        for {
-               x := v.Args[0]
-               v.reset(OpARMSQRTD)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRL {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMXORshiftRLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpStaticCall(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (StaticCall [argwid] {target} mem)
-       // cond:
-       // result: (CALLstatic [argwid] {target} mem)
-       for {
-               argwid := v.AuxInt
-               target := v.Aux
-               mem := v.Args[0]
-               v.reset(OpARMCALLstatic)
-               v.AuxInt = argwid
-               v.Aux = target
-               v.AddArg(mem)
-               return true
-       }
-}
-func rewriteValueARM_OpStore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Store [1] ptr val mem)
+       // match: (XOR x (SRA y z))
        // cond:
-       // result: (MOVBstore ptr val mem)
+       // result: (XORshiftRAreg x y z)
        for {
-               if v.AuxInt != 1 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRA {
                        break
                }
-               ptr := v.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVBstore)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMXORshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (Store [2] ptr val mem)
+       // match: (XOR (SRA y z) x)
        // cond:
-       // result: (MOVHstore ptr val mem)
+       // result: (XORshiftRAreg x y z)
        for {
-               if v.AuxInt != 2 {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRA {
                        break
                }
-               ptr := v.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVHstore)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMXORshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (Store [4] ptr val mem)
-       // cond: !is32BitFloat(val.Type)
-       // result: (MOVWstore ptr val mem)
+       // match: (XOR x x)
+       // cond:
+       // result: (MOVWconst [0])
        for {
-               if v.AuxInt != 4 {
-                       break
-               }
-               ptr := v.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(!is32BitFloat(val.Type)) {
+               x := v.Args[0]
+               if x != v.Args[1] {
                        break
                }
-               v.reset(OpARMMOVWstore)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (Store [4] ptr val mem)
-       // cond: is32BitFloat(val.Type)
-       // result: (MOVFstore ptr val mem)
+       return false
+}
+func rewriteValueARM_OpARMXORconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (XORconst [0] x)
+       // cond:
+       // result: x
        for {
-               if v.AuxInt != 4 {
-                       break
-               }
-               ptr := v.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32BitFloat(val.Type)) {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpARMMOVFstore)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (Store [8] ptr val mem)
-       // cond: is64BitFloat(val.Type)
-       // result: (MOVDstore ptr val mem)
+       // match: (XORconst [c] (MOVWconst [d]))
+       // cond:
+       // result: (MOVWconst [c^d])
        for {
-               if v.AuxInt != 8 {
-                       break
-               }
-               ptr := v.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is64BitFloat(val.Type)) {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVDstore)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = c ^ d
                return true
        }
-       return false
-}
-func rewriteValueARM_OpSub16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Sub16 x y)
+       // match: (XORconst [c] (XORconst [d] x))
        // cond:
-       // result: (SUB x y)
+       // result: (XORconst [c^d] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSUB)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMXORconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMXORconst)
+               v.AuxInt = c ^ d
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpSub32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sub32 x y)
+       // match: (XORshiftLL (MOVWconst [c]) x [d])
        // cond:
-       // result: (SUB x y)
+       // result: (XORconst [c] (SLLconst <x.Type> x [d]))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSUB)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMXORconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpSub32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Sub32F x y)
+       // match: (XORshiftLL x (MOVWconst [c]) [d])
        // cond:
-       // result: (SUBF x y)
+       // result: (XORconst x [int64(uint32(c)<<uint64(d))])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSUBF)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMXORconst)
                v.AddArg(x)
-               v.AddArg(y)
+               v.AuxInt = int64(uint32(c) << uint64(d))
                return true
        }
-}
-func rewriteValueARM_OpSub32carry(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Sub32carry x y)
-       // cond:
-       // result: (SUBS x y)
+       // match: (XORshiftLL x (SLLconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSUBS)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
+                       break
+               }
+               if x != v_1.Args[0] {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
+       return false
 }
-func rewriteValueARM_OpSub32withcarry(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftLLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sub32withcarry x y c)
+       // match: (XORshiftLLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (SBC x y c)
+       // result: (XORconst [c] (SLL <x.Type> x y))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               c := v.Args[2]
-               v.reset(OpARMSBC)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(c)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMXORconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpSub64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Sub64F x y)
+       // match: (XORshiftLLreg x y (MOVWconst [c]))
        // cond:
-       // result: (SUBD x y)
+       // result: (XORshiftLL x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSUBD)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMXORshiftLL)
                v.AddArg(x)
                v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpSub8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftRA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sub8 x y)
+       // match: (XORshiftRA (MOVWconst [c]) x [d])
        // cond:
-       // result: (SUB x y)
+       // result: (XORconst [c] (SRAconst <x.Type> x [d]))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSUB)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               d := v.AuxInt
+               v.reset(OpARMXORconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpSubPtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SubPtr x y)
+       // match: (XORshiftRA x (MOVWconst [c]) [d])
        // cond:
-       // result: (SUB x y)
+       // result: (XORconst x [int64(int32(c)>>uint64(d))])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSUB)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               v.reset(OpARMXORconst)
                v.AddArg(x)
-               v.AddArg(y)
+               v.AuxInt = int64(int32(c) >> uint64(d))
                return true
        }
-}
-func rewriteValueARM_OpTrunc16to8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Trunc16to8 x)
-       // cond:
-       // result: x
+       // match: (XORshiftRA x (SRAconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
        for {
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
+                       break
+               }
+               if x != v_1.Args[0] {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
+       return false
 }
-func rewriteValueARM_OpTrunc32to16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftRAreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Trunc32to16 x)
+       // match: (XORshiftRAreg (MOVWconst [c]) x y)
        // cond:
-       // result: x
+       // result: (XORconst [c] (SRA <x.Type> x y))
        for {
-               x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMXORconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpTrunc32to8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Trunc32to8 x)
+       // match: (XORshiftRAreg x y (MOVWconst [c]))
        // cond:
-       // result: x
+       // result: (XORshiftRA x y [c])
        for {
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMXORshiftRA)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (XOR (MOVWconst [c]) x)
+       // match: (XORshiftRL (MOVWconst [c]) x [d])
        // cond:
-       // result: (XORconst [c] x)
+       // result: (XORconst [c] (SRLconst <x.Type> x [d]))
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -7933,14 +16409,18 @@ func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
                }
                c := v_0.AuxInt
                x := v.Args[1]
+               d := v.AuxInt
                v.reset(OpARMXORconst)
                v.AuxInt = c
-               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AddArg(x)
+               v0.AuxInt = d
+               v.AddArg(v0)
                return true
        }
-       // match: (XOR x (MOVWconst [c]))
+       // match: (XORshiftRL x (MOVWconst [c]) [d])
        // cond:
-       // result: (XORconst [c] x)
+       // result: (XORconst x [int64(uint32(c)>>uint64(d))])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
@@ -7948,17 +16428,27 @@ func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
                        break
                }
                c := v_1.AuxInt
+               d := v.AuxInt
                v.reset(OpARMXORconst)
-               v.AuxInt = c
                v.AddArg(x)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
                return true
        }
-       // match: (XOR x x)
-       // cond:
+       // match: (XORshiftRL x (SRLconst x [c]) [d])
+       // cond: c==d
        // result: (MOVWconst [0])
        for {
                x := v.Args[0]
-               if x != v.Args[1] {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
+                       break
+               }
+               if x != v_1.Args[0] {
+                       break
+               }
+               c := v_1.AuxInt
+               d := v.AuxInt
+               if !(c == d) {
                        break
                }
                v.reset(OpARMMOVWconst)
@@ -7967,50 +16457,43 @@ func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
        }
        return false
 }
-func rewriteValueARM_OpARMXORconst(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftRLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (XORconst [0] x)
-       // cond:
-       // result: x
-       for {
-               if v.AuxInt != 0 {
-                       break
-               }
-               x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
-               return true
-       }
-       // match: (XORconst [c] (MOVWconst [d]))
+       // match: (XORshiftRLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (MOVWconst [c^d])
+       // result: (XORconst [c] (SRL <x.Type> x y))
        for {
-               c := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = c ^ d
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMXORconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (XORconst [c] (XORconst [d] x))
+       // match: (XORshiftRLreg x y (MOVWconst [c]))
        // cond:
-       // result: (XORconst [c^d] x)
+       // result: (XORshiftRL x y [c])
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMXORconst {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMXORconst)
-               v.AuxInt = c ^ d
+               c := v_2.AuxInt
+               v.reset(OpARMXORshiftRL)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AuxInt = c
                return true
        }
        return false
@@ -8365,11 +16848,16 @@ func rewriteValueARM_OpZeromask(v *Value, config *Config) bool {
        _ = b
        // match: (Zeromask x)
        // cond:
-       // result: (LoweredZeromask x)
+       // result: (SRAconst (RSBshiftRL <config.fe.TypeInt32()> x x [1]) [31])
        for {
                x := v.Args[0]
-               v.reset(OpARMLoweredZeromask)
-               v.AddArg(x)
+               v.reset(OpARMSRAconst)
+               v0 := b.NewValue0(v.Line, OpARMRSBshiftRL, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v0.AuxInt = 1
+               v.AddArg(v0)
+               v.AuxInt = 31
                return true
        }
 }