]> Cypherpunks repositories - gostls13.git/commitdiff
[dev.ssa] cmd/compile: add some ARM64 optimizations
authorCherry Zhang <cherryyz@google.com>
Wed, 3 Aug 2016 13:56:36 +0000 (09:56 -0400)
committerCherry Zhang <cherryyz@google.com>
Thu, 11 Aug 2016 18:08:47 +0000 (18:08 +0000)
Mostly mirrors ARM, includes:
- constant folding
- simplification of load, store, extension, and arithmetics
- nilcheck removal

Change-Id: Iffaa5fcdce100fe327429ecab316cb395e543469
Reviewed-on: https://go-review.googlesource.com/26710
Run-TryBot: Cherry Zhang <cherryyz@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
src/cmd/compile/internal/arm64/prog.go
src/cmd/compile/internal/arm64/ssa.go
src/cmd/compile/internal/ssa/gen/ARM64.rules
src/cmd/compile/internal/ssa/gen/ARM64Ops.go
src/cmd/compile/internal/ssa/opGen.go
src/cmd/compile/internal/ssa/rewriteARM64.go

index 49aaa64c37a444ebce5015895f6ee87902ba24e4..783a371015762274f57ba15e6fdb6122f65d903d 100644 (file)
@@ -48,6 +48,7 @@ var progtable = [arm64.ALAST & obj.AMask]obj.ProgInfo{
        arm64.AAND & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
        arm64.AORR & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
        arm64.AEOR & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+       arm64.ABIC & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
        arm64.AMVN & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite},
        arm64.AMUL & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
        arm64.AMULW & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
index 52580242dc02435a3e295ef7430eec5ad95b457f..24281300aef0cf0fac2ea68d0d3c588243884f86 100644 (file)
@@ -182,6 +182,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                p.From.Reg = x
                p.To.Type = obj.TYPE_REG
                p.To.Reg = y
+       case ssa.OpARM64MOVDnop:
+               if gc.SSARegNum(v) != gc.SSARegNum(v.Args[0]) {
+                       v.Fatalf("input[0] and output not in same register %s", v.LongString())
+               }
+               // nothing to do
        case ssa.OpLoadReg:
                if v.Type.IsFlags() {
                        v.Unimplementedf("load flags not implemented: %v", v.LongString())
@@ -581,6 +586,64 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                        gc.Maxarg = v.AuxInt
                }
        case ssa.OpARM64LoweredNilCheck:
+               // Optimization - if the subsequent block has a load or store
+               // at the same address, we don't need to issue this instruction.
+               mem := v.Args[1]
+               for _, w := range v.Block.Succs[0].Block().Values {
+                       if w.Op == ssa.OpPhi {
+                               if w.Type.IsMemory() {
+                                       mem = w
+                               }
+                               continue
+                       }
+                       if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
+                               // w doesn't use a store - can't be a memory op.
+                               continue
+                       }
+                       if w.Args[len(w.Args)-1] != mem {
+                               v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
+                       }
+                       switch w.Op {
+                       case ssa.OpARM64MOVBload, ssa.OpARM64MOVBUload, ssa.OpARM64MOVHload, ssa.OpARM64MOVHUload,
+                               ssa.OpARM64MOVWload, ssa.OpARM64MOVWUload, ssa.OpARM64MOVDload,
+                               ssa.OpARM64FMOVSload, ssa.OpARM64FMOVDload,
+                               ssa.OpARM64MOVBstore, ssa.OpARM64MOVHstore, ssa.OpARM64MOVWstore, ssa.OpARM64MOVDstore,
+                               ssa.OpARM64FMOVSstore, ssa.OpARM64FMOVDstore:
+                               // arg0 is ptr, auxint is offset
+                               if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
+                                       if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+                                               gc.Warnl(v.Line, "removed nil check")
+                                       }
+                                       return
+                               }
+                       case ssa.OpARM64DUFFZERO, ssa.OpARM64LoweredZero:
+                               // arg0 is ptr
+                               if w.Args[0] == v.Args[0] {
+                                       if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+                                               gc.Warnl(v.Line, "removed nil check")
+                                       }
+                                       return
+                               }
+                       case ssa.OpARM64LoweredMove:
+                               // arg0 is dst ptr, arg1 is src ptr
+                               if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] {
+                                       if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+                                               gc.Warnl(v.Line, "removed nil check")
+                                       }
+                                       return
+                               }
+                       default:
+                       }
+                       if w.Type.IsMemory() {
+                               if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
+                                       // these ops are OK
+                                       mem = w
+                                       continue
+                               }
+                               // We can't delay the nil check past the next store.
+                               break
+                       }
+               }
                // Issue a load which will fault if arg is nil.
                p := gc.Prog(arm64.AMOVB)
                p.From.Type = obj.TYPE_MEM
index 9b80094f86c72e83a51d012447745ad4aa37fa4f..715bddee388f6ccb8f71a852d3dd35934127eac0 100644 (file)
 
 (If cond yes no) -> (NE (CMPconst [0] cond) yes no)
 
+// Optimizations
+
 // Absorb boolean tests into block
 (NE (CMPconst [0] (Equal cc)) yes no) -> (EQ cc yes no)
 (NE (CMPconst [0] (NotEqual cc)) yes no) -> (NE cc yes no)
 (NE (CMPconst [0] (GreaterEqual cc)) yes no) -> (GE cc yes no)
 (NE (CMPconst [0] (GreaterEqualU cc)) yes no) -> (UGE cc yes no)
 
-// Optimizations
-
 // fold offset into address
 (ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) -> (MOVDaddr [off1+off2] {sym} ptr)
 
        (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
        (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+
+// replace load from same location as preceding store with copy
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVWUload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(FMOVSload [off] {sym} ptr (FMOVSstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(FMOVDload [off] {sym} ptr (FMOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+
+// don't extend after proper load
+(MOVBreg x:(MOVBload _ _)) -> (MOVDreg x)
+(MOVBUreg x:(MOVBUload _ _)) -> (MOVDreg x)
+(MOVHreg x:(MOVBload _ _)) -> (MOVDreg x)
+(MOVHreg x:(MOVBUload _ _)) -> (MOVDreg x)
+(MOVHreg x:(MOVHload _ _)) -> (MOVDreg x)
+(MOVHUreg x:(MOVBUload _ _)) -> (MOVDreg x)
+(MOVHUreg x:(MOVHUload _ _)) -> (MOVDreg x)
+(MOVWreg x:(MOVBload _ _)) -> (MOVDreg x)
+(MOVWreg x:(MOVBUload _ _)) -> (MOVDreg x)
+(MOVWreg x:(MOVHload _ _)) -> (MOVDreg x)
+(MOVWreg x:(MOVHUload _ _)) -> (MOVDreg x)
+(MOVWreg x:(MOVWload _ _)) -> (MOVDreg x)
+(MOVWUreg x:(MOVBUload _ _)) -> (MOVDreg x)
+(MOVWUreg x:(MOVHUload _ _)) -> (MOVDreg x)
+(MOVWUreg x:(MOVWUload _ _)) -> (MOVDreg x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) -> (MOVDreg x)
+(MOVBUreg x:(MOVBUreg _)) -> (MOVDreg x)
+(MOVHreg x:(MOVBreg _)) -> (MOVDreg x)
+(MOVHreg x:(MOVBUreg _)) -> (MOVDreg x)
+(MOVHreg x:(MOVHreg _)) -> (MOVDreg x)
+(MOVHUreg x:(MOVBUreg _)) -> (MOVDreg x)
+(MOVHUreg x:(MOVHUreg _)) -> (MOVDreg x)
+(MOVWreg x:(MOVBreg _)) -> (MOVDreg x)
+(MOVWreg x:(MOVBUreg _)) -> (MOVDreg x)
+(MOVWreg x:(MOVHreg _)) -> (MOVDreg x)
+(MOVWreg x:(MOVHreg _)) -> (MOVDreg x)
+(MOVWreg x:(MOVWreg _)) -> (MOVDreg x)
+(MOVWUreg x:(MOVBUreg _)) -> (MOVDreg x)
+(MOVWUreg x:(MOVHUreg _)) -> (MOVDreg x)
+(MOVWUreg x:(MOVWUreg _)) -> (MOVDreg x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVDnop doesn't emit instruction, only for ensuring the type.
+(MOVDreg x) && x.Uses == 1 -> (MOVDnop x)
+
+// fold constant into arithmatic ops
+(ADD (MOVDconst [c]) x) -> (ADDconst [c] x)
+(ADD x (MOVDconst [c])) -> (ADDconst [c] x)
+(SUB x (MOVDconst [c])) -> (SUBconst [c] x)
+(AND (MOVDconst [c]) x) -> (ANDconst [c] x)
+(AND x (MOVDconst [c])) -> (ANDconst [c] x)
+(OR  (MOVDconst [c]) x) -> (ORconst  [c] x)
+(OR  x (MOVDconst [c])) -> (ORconst  [c] x)
+(XOR (MOVDconst [c]) x) -> (XORconst [c] x)
+(XOR x (MOVDconst [c])) -> (XORconst [c] x)
+(BIC x (MOVDconst [c])) -> (BICconst [c] x)
+
+(SLL x (MOVDconst [c])) -> (SLLconst x [c&63]) // Note: I don't think we ever generate bad constant shifts (i.e. c>=64)
+(SRL x (MOVDconst [c])) -> (SRLconst x [c&63])
+(SRA x (MOVDconst [c])) -> (SRAconst x [c&63])
+
+(CMP x (MOVDconst [c])) -> (CMPconst [c] x)
+(CMP (MOVDconst [c]) x) -> (InvertFlags (CMPconst [c] x))
+(CMPW x (MOVDconst [c])) -> (CMPWconst [int64(int32(c))] x)
+(CMPW (MOVDconst [c]) x) -> (InvertFlags (CMPWconst [int64(int32(c))] x))
+
+// mul by constant
+(MUL x (MOVDconst [-1])) -> (NEG x)
+(MUL _ (MOVDconst [0])) -> (MOVDconst [0])
+(MUL x (MOVDconst [1])) -> x
+(MUL x (MOVDconst [c])) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
+
+(MUL (MOVDconst [-1]) x) -> (NEG x)
+(MUL (MOVDconst [0]) _) -> (MOVDconst [0])
+(MUL (MOVDconst [1]) x) -> x
+(MUL (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
+
+(MULW x (MOVDconst [c])) && int32(c)==-1 -> (NEG x)
+(MULW _ (MOVDconst [c])) && int32(c)==0 -> (MOVDconst [0])
+(MULW x (MOVDconst [c])) && int32(c)==1 -> x
+(MULW x (MOVDconst [c])) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
+
+(MULW (MOVDconst [c]) x) && int32(c)==-1 -> (NEG x)
+(MULW (MOVDconst [c]) _) && int32(c)==0 -> (MOVDconst [0])
+(MULW (MOVDconst [c]) x) && int32(c)==1 -> x
+(MULW (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
+
+// div by constant
+(UDIV x (MOVDconst [1])) -> x
+(UDIV x (MOVDconst [c])) && isPowerOfTwo(c) -> (SRLconst [log2(c)] x)
+(UDIVW x (MOVDconst [c])) && uint32(c)==1 -> x
+(UDIVW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) -> (SRLconst [log2(c)] x)
+(UMOD _ (MOVDconst [1])) -> (MOVDconst [0])
+(UMOD x (MOVDconst [c])) && isPowerOfTwo(c) -> (ANDconst [c-1] x)
+(UMODW _ (MOVDconst [c])) && uint32(c)==1 -> (MOVDconst [0])
+(UMODW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) -> (ANDconst [c-1] x)
+
+// generic simplifications
+(ADD x (NEG y)) -> (SUB x y)
+(ADD (NEG y) x) -> (SUB x y)
+(SUB x x) -> (MOVDconst [0])
+(AND x x) -> x
+(OR  x x) -> x
+(XOR x x) -> (MOVDconst [0])
+(BIC x x) -> (MOVDconst [0])
+(AND x (MVN y)) -> (BIC x y)
+
+// remove redundant *const ops
+(ADDconst [0]  x) -> x
+(SUBconst [0]  x) -> x
+(ANDconst [0]  _) -> (MOVDconst [0])
+(ANDconst [-1] x) -> x
+(ORconst  [0]  x) -> x
+(ORconst  [-1] _) -> (MOVDconst [-1])
+(XORconst [0]  x) -> x
+(XORconst [-1] x) -> (MVN x)
+(BICconst [0]  x) -> x
+(BICconst [-1] _) -> (MOVDconst [0])
+
+// generic constant folding
+(ADDconst [c] (MOVDconst [d]))  -> (MOVDconst [c+d])
+(ADDconst [c] (ADDconst [d] x)) -> (ADDconst [c+d] x)
+(ADDconst [c] (SUBconst [d] x)) -> (ADDconst [c-d] x)
+(SUBconst [c] (MOVDconst [d]))  -> (MOVDconst [d-c])
+(SUBconst [c] (SUBconst [d] x)) -> (ADDconst [-c-d] x)
+(SUBconst [c] (ADDconst [d] x)) -> (ADDconst [-c+d] x)
+(SLLconst [c] (MOVDconst [d]))  -> (MOVDconst [int64(d)<<uint64(c)])
+(SRLconst [c] (MOVDconst [d]))  -> (MOVDconst [int64(uint64(d)>>uint64(c))])
+(SRAconst [c] (MOVDconst [d]))  -> (MOVDconst [int64(d)>>uint64(c)])
+(MUL   (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c*d])
+(MULW  (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)*int32(d))])
+(DIV   (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(c)/int64(d)])
+(UDIV  (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint64(c)/uint64(d))])
+(DIVW  (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)/int32(d))])
+(UDIVW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint32(c)/uint32(d))])
+(MOD   (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(c)%int64(d)])
+(UMOD  (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint64(c)%uint64(d))])
+(MODW  (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)%int32(d))])
+(UMODW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint32(c)%uint32(d))])
+(ANDconst [c] (MOVDconst [d]))  -> (MOVDconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
+(ORconst  [c] (MOVDconst [d]))  -> (MOVDconst [c|d])
+(ORconst  [c] (ORconst [d] x))  -> (ORconst [c|d] x)
+(XORconst [c] (MOVDconst [d]))  -> (MOVDconst [c^d])
+(XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
+(BICconst [c] (MOVDconst [d]))  -> (MOVDconst [d&^c])
+(MVN (MOVDconst [c])) -> (MOVDconst [^c])
+(NEG (MOVDconst [c])) -> (MOVDconst [-c])
+(MOVBreg  (MOVDconst [c])) -> (MOVDconst [int64(int8(c))])
+(MOVBUreg (MOVDconst [c])) -> (MOVDconst [int64(uint8(c))])
+(MOVHreg  (MOVDconst [c])) -> (MOVDconst [int64(int16(c))])
+(MOVHUreg (MOVDconst [c])) -> (MOVDconst [int64(uint16(c))])
+(MOVWreg  (MOVDconst [c])) -> (MOVDconst [int64(int32(c))])
+(MOVWUreg (MOVDconst [c])) -> (MOVDconst [int64(uint32(c))])
+(MOVDreg  (MOVDconst [c])) -> (MOVDconst [c])
+
+// constant comparisons
+(CMPconst  (MOVDconst [x]) [y]) && x==y -> (FlagEQ)
+(CMPconst  (MOVDconst [x]) [y]) && int64(x)<int64(y) && uint64(x)<uint64(y) -> (FlagLT_ULT)
+(CMPconst  (MOVDconst [x]) [y]) && int64(x)<int64(y) && uint64(x)>uint64(y) -> (FlagLT_UGT)
+(CMPconst  (MOVDconst [x]) [y]) && int64(x)>int64(y) && uint64(x)<uint64(y) -> (FlagGT_ULT)
+(CMPconst  (MOVDconst [x]) [y]) && int64(x)>int64(y) && uint64(x)>uint64(y) -> (FlagGT_UGT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT)
+
+// other known comparisons
+(CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT_ULT)
+(CMPconst (MOVHUreg _) [c]) && 0xffff < c -> (FlagLT_ULT)
+(CMPconst (MOVWUreg _) [c]) && 0xffffffff < c -> (FlagLT_ULT)
+(CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT)
+(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n) -> (FlagLT_ULT)
+(CMPWconst (MOVBUreg _) [c]) && 0xff < int32(c) -> (FlagLT_ULT)
+(CMPWconst (MOVHUreg _) [c]) && 0xffff < int32(c) -> (FlagLT_ULT)
+
+// absorb flag constants into branches
+(EQ (FlagEQ) yes no) -> (First nil yes no)
+(EQ (FlagLT_ULT) yes no) -> (First nil no yes)
+(EQ (FlagLT_UGT) yes no) -> (First nil no yes)
+(EQ (FlagGT_ULT) yes no) -> (First nil no yes)
+(EQ (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(NE (FlagEQ) yes no) -> (First nil no yes)
+(NE (FlagLT_ULT) yes no) -> (First nil yes no)
+(NE (FlagLT_UGT) yes no) -> (First nil yes no)
+(NE (FlagGT_ULT) yes no) -> (First nil yes no)
+(NE (FlagGT_UGT) yes no) -> (First nil yes no)
+
+(LT (FlagEQ) yes no) -> (First nil no yes)
+(LT (FlagLT_ULT) yes no) -> (First nil yes no)
+(LT (FlagLT_UGT) yes no) -> (First nil yes no)
+(LT (FlagGT_ULT) yes no) -> (First nil no yes)
+(LT (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(LE (FlagEQ) yes no) -> (First nil yes no)
+(LE (FlagLT_ULT) yes no) -> (First nil yes no)
+(LE (FlagLT_UGT) yes no) -> (First nil yes no)
+(LE (FlagGT_ULT) yes no) -> (First nil no yes)
+(LE (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(GT (FlagEQ) yes no) -> (First nil no yes)
+(GT (FlagLT_ULT) yes no) -> (First nil no yes)
+(GT (FlagLT_UGT) yes no) -> (First nil no yes)
+(GT (FlagGT_ULT) yes no) -> (First nil yes no)
+(GT (FlagGT_UGT) yes no) -> (First nil yes no)
+
+(GE (FlagEQ) yes no) -> (First nil yes no)
+(GE (FlagLT_ULT) yes no) -> (First nil no yes)
+(GE (FlagLT_UGT) yes no) -> (First nil no yes)
+(GE (FlagGT_ULT) yes no) -> (First nil yes no)
+(GE (FlagGT_UGT) yes no) -> (First nil yes no)
+
+(ULT (FlagEQ) yes no) -> (First nil no yes)
+(ULT (FlagLT_ULT) yes no) -> (First nil yes no)
+(ULT (FlagLT_UGT) yes no) -> (First nil no yes)
+(ULT (FlagGT_ULT) yes no) -> (First nil yes no)
+(ULT (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(ULE (FlagEQ) yes no) -> (First nil yes no)
+(ULE (FlagLT_ULT) yes no) -> (First nil yes no)
+(ULE (FlagLT_UGT) yes no) -> (First nil no yes)
+(ULE (FlagGT_ULT) yes no) -> (First nil yes no)
+(ULE (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(UGT (FlagEQ) yes no) -> (First nil no yes)
+(UGT (FlagLT_ULT) yes no) -> (First nil no yes)
+(UGT (FlagLT_UGT) yes no) -> (First nil yes no)
+(UGT (FlagGT_ULT) yes no) -> (First nil no yes)
+(UGT (FlagGT_UGT) yes no) -> (First nil yes no)
+
+(UGE (FlagEQ) yes no) -> (First nil yes no)
+(UGE (FlagLT_ULT) yes no) -> (First nil no yes)
+(UGE (FlagLT_UGT) yes no) -> (First nil yes no)
+(UGE (FlagGT_ULT) yes no) -> (First nil no yes)
+(UGE (FlagGT_UGT) yes no) -> (First nil yes no)
+
+// absorb InvertFlags into branches
+(LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
+
+// absorb flag constants into boolean values
+(Equal (FlagEQ)) -> (MOVDconst [1])
+(Equal (FlagLT_ULT)) -> (MOVDconst [0])
+(Equal (FlagLT_UGT)) -> (MOVDconst [0])
+(Equal (FlagGT_ULT)) -> (MOVDconst [0])
+(Equal (FlagGT_UGT)) -> (MOVDconst [0])
+
+(NotEqual (FlagEQ)) -> (MOVDconst [0])
+(NotEqual (FlagLT_ULT)) -> (MOVDconst [1])
+(NotEqual (FlagLT_UGT)) -> (MOVDconst [1])
+(NotEqual (FlagGT_ULT)) -> (MOVDconst [1])
+(NotEqual (FlagGT_UGT)) -> (MOVDconst [1])
+
+(LessThan (FlagEQ)) -> (MOVDconst [0])
+(LessThan (FlagLT_ULT)) -> (MOVDconst [1])
+(LessThan (FlagLT_UGT)) -> (MOVDconst [1])
+(LessThan (FlagGT_ULT)) -> (MOVDconst [0])
+(LessThan (FlagGT_UGT)) -> (MOVDconst [0])
+
+(LessThanU (FlagEQ)) -> (MOVDconst [0])
+(LessThanU (FlagLT_ULT)) -> (MOVDconst [1])
+(LessThanU (FlagLT_UGT)) -> (MOVDconst [0])
+(LessThanU (FlagGT_ULT)) -> (MOVDconst [1])
+(LessThanU (FlagGT_UGT)) -> (MOVDconst [0])
+
+(LessEqual (FlagEQ)) -> (MOVDconst [1])
+(LessEqual (FlagLT_ULT)) -> (MOVDconst [1])
+(LessEqual (FlagLT_UGT)) -> (MOVDconst [1])
+(LessEqual (FlagGT_ULT)) -> (MOVDconst [0])
+(LessEqual (FlagGT_UGT)) -> (MOVDconst [0])
+
+(LessEqualU (FlagEQ)) -> (MOVDconst [1])
+(LessEqualU (FlagLT_ULT)) -> (MOVDconst [1])
+(LessEqualU (FlagLT_UGT)) -> (MOVDconst [0])
+(LessEqualU (FlagGT_ULT)) -> (MOVDconst [1])
+(LessEqualU (FlagGT_UGT)) -> (MOVDconst [0])
+
+(GreaterThan (FlagEQ)) -> (MOVDconst [0])
+(GreaterThan (FlagLT_ULT)) -> (MOVDconst [0])
+(GreaterThan (FlagLT_UGT)) -> (MOVDconst [0])
+(GreaterThan (FlagGT_ULT)) -> (MOVDconst [1])
+(GreaterThan (FlagGT_UGT)) -> (MOVDconst [1])
+
+(GreaterThanU (FlagEQ)) -> (MOVDconst [0])
+(GreaterThanU (FlagLT_ULT)) -> (MOVDconst [0])
+(GreaterThanU (FlagLT_UGT)) -> (MOVDconst [1])
+(GreaterThanU (FlagGT_ULT)) -> (MOVDconst [0])
+(GreaterThanU (FlagGT_UGT)) -> (MOVDconst [1])
+
+(GreaterEqual (FlagEQ)) -> (MOVDconst [1])
+(GreaterEqual (FlagLT_ULT)) -> (MOVDconst [0])
+(GreaterEqual (FlagLT_UGT)) -> (MOVDconst [0])
+(GreaterEqual (FlagGT_ULT)) -> (MOVDconst [1])
+(GreaterEqual (FlagGT_UGT)) -> (MOVDconst [1])
+
+(GreaterEqualU (FlagEQ)) -> (MOVDconst [1])
+(GreaterEqualU (FlagLT_ULT)) -> (MOVDconst [0])
+(GreaterEqualU (FlagLT_UGT)) -> (MOVDconst [1])
+(GreaterEqualU (FlagGT_ULT)) -> (MOVDconst [0])
+(GreaterEqualU (FlagGT_UGT)) -> (MOVDconst [1])
+
+// absorb InvertFlags into boolean values
+(Equal (InvertFlags x)) -> (Equal x)
+(NotEqual (InvertFlags x)) -> (NotEqual x)
+(LessThan (InvertFlags x)) -> (GreaterThan x)
+(LessThanU (InvertFlags x)) -> (GreaterThanU x)
+(GreaterThan (InvertFlags x)) -> (LessThan x)
+(GreaterThanU (InvertFlags x)) -> (LessThanU x)
+(LessEqual (InvertFlags x)) -> (GreaterEqual x)
+(LessEqualU (InvertFlags x)) -> (GreaterEqualU x)
+(GreaterEqual (InvertFlags x)) -> (LessEqual x)
+(GreaterEqualU (InvertFlags x)) -> (LessEqualU x)
+
+// absorb flag constants into conditional instructions
+(CSELULT _ y (FlagEQ)) -> y
+(CSELULT x _ (FlagLT_ULT)) -> x
+(CSELULT _ y (FlagLT_UGT)) -> y
+(CSELULT x _ (FlagGT_ULT)) -> x
+(CSELULT _ y (FlagGT_UGT)) -> y
index 9c3453faaabcb85dd31e87307897550059c025d0..e30fcd63420a26908dfb7b3768db549159a09c07 100644 (file)
@@ -261,6 +261,8 @@ func init() {
                {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word
                {name: "MOVDreg", argLength: 1, reg: gp11, asm: "MOVD"},   // move from arg0
 
+               {name: "MOVDnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
+
                {name: "SCVTFWS", argLength: 1, reg: gpfp, asm: "SCVTFWS"},   // int32 -> float32
                {name: "SCVTFWD", argLength: 1, reg: gpfp, asm: "SCVTFWD"},   // int32 -> float64
                {name: "UCVTFWS", argLength: 1, reg: gpfp, asm: "UCVTFWS"},   // uint32 -> float32
index b8d3b7eac5df505a01de55d16f88f7e6b1533998..38a2ba71360b66d10bca0950314e38717b6e6c90 100644 (file)
@@ -880,6 +880,7 @@ const (
        OpARM64MOVWreg
        OpARM64MOVWUreg
        OpARM64MOVDreg
+       OpARM64MOVDnop
        OpARM64SCVTFWS
        OpARM64SCVTFWD
        OpARM64UCVTFWS
@@ -10872,6 +10873,19 @@ var opcodeTable = [...]opInfo{
                        },
                },
        },
+       {
+               name:         "MOVDnop",
+               argLen:       1,
+               resultInArg0: true,
+               reg: regInfo{
+                       inputs: []inputInfo{
+                               {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+                       },
+                       outputs: []outputInfo{
+                               {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+                       },
+               },
+       },
        {
                name:   "SCVTFWS",
                argLen: 1,
index e268f5907943eabd56a7f03905fe44dd8898811f..6350d1d6b01aa12d8a0c7bcf709ca0d7a39a6db6 100644 (file)
@@ -8,8 +8,34 @@ import "math"
 var _ = math.MinInt8 // in case not otherwise used
 func rewriteValueARM64(v *Value, config *Config) bool {
        switch v.Op {
+       case OpARM64ADD:
+               return rewriteValueARM64_OpARM64ADD(v, config)
        case OpARM64ADDconst:
                return rewriteValueARM64_OpARM64ADDconst(v, config)
+       case OpARM64AND:
+               return rewriteValueARM64_OpARM64AND(v, config)
+       case OpARM64ANDconst:
+               return rewriteValueARM64_OpARM64ANDconst(v, config)
+       case OpARM64BIC:
+               return rewriteValueARM64_OpARM64BIC(v, config)
+       case OpARM64BICconst:
+               return rewriteValueARM64_OpARM64BICconst(v, config)
+       case OpARM64CMP:
+               return rewriteValueARM64_OpARM64CMP(v, config)
+       case OpARM64CMPW:
+               return rewriteValueARM64_OpARM64CMPW(v, config)
+       case OpARM64CMPWconst:
+               return rewriteValueARM64_OpARM64CMPWconst(v, config)
+       case OpARM64CMPconst:
+               return rewriteValueARM64_OpARM64CMPconst(v, config)
+       case OpARM64CSELULT:
+               return rewriteValueARM64_OpARM64CSELULT(v, config)
+       case OpARM64DIV:
+               return rewriteValueARM64_OpARM64DIV(v, config)
+       case OpARM64DIVW:
+               return rewriteValueARM64_OpARM64DIVW(v, config)
+       case OpARM64Equal:
+               return rewriteValueARM64_OpARM64Equal(v, config)
        case OpARM64FMOVDload:
                return rewriteValueARM64_OpARM64FMOVDload(v, config)
        case OpARM64FMOVDstore:
@@ -18,28 +44,104 @@ func rewriteValueARM64(v *Value, config *Config) bool {
                return rewriteValueARM64_OpARM64FMOVSload(v, config)
        case OpARM64FMOVSstore:
                return rewriteValueARM64_OpARM64FMOVSstore(v, config)
+       case OpARM64GreaterEqual:
+               return rewriteValueARM64_OpARM64GreaterEqual(v, config)
+       case OpARM64GreaterEqualU:
+               return rewriteValueARM64_OpARM64GreaterEqualU(v, config)
+       case OpARM64GreaterThan:
+               return rewriteValueARM64_OpARM64GreaterThan(v, config)
+       case OpARM64GreaterThanU:
+               return rewriteValueARM64_OpARM64GreaterThanU(v, config)
+       case OpARM64LessEqual:
+               return rewriteValueARM64_OpARM64LessEqual(v, config)
+       case OpARM64LessEqualU:
+               return rewriteValueARM64_OpARM64LessEqualU(v, config)
+       case OpARM64LessThan:
+               return rewriteValueARM64_OpARM64LessThan(v, config)
+       case OpARM64LessThanU:
+               return rewriteValueARM64_OpARM64LessThanU(v, config)
+       case OpARM64MOD:
+               return rewriteValueARM64_OpARM64MOD(v, config)
+       case OpARM64MODW:
+               return rewriteValueARM64_OpARM64MODW(v, config)
        case OpARM64MOVBUload:
                return rewriteValueARM64_OpARM64MOVBUload(v, config)
+       case OpARM64MOVBUreg:
+               return rewriteValueARM64_OpARM64MOVBUreg(v, config)
        case OpARM64MOVBload:
                return rewriteValueARM64_OpARM64MOVBload(v, config)
+       case OpARM64MOVBreg:
+               return rewriteValueARM64_OpARM64MOVBreg(v, config)
        case OpARM64MOVBstore:
                return rewriteValueARM64_OpARM64MOVBstore(v, config)
        case OpARM64MOVDload:
                return rewriteValueARM64_OpARM64MOVDload(v, config)
+       case OpARM64MOVDreg:
+               return rewriteValueARM64_OpARM64MOVDreg(v, config)
        case OpARM64MOVDstore:
                return rewriteValueARM64_OpARM64MOVDstore(v, config)
        case OpARM64MOVHUload:
                return rewriteValueARM64_OpARM64MOVHUload(v, config)
+       case OpARM64MOVHUreg:
+               return rewriteValueARM64_OpARM64MOVHUreg(v, config)
        case OpARM64MOVHload:
                return rewriteValueARM64_OpARM64MOVHload(v, config)
+       case OpARM64MOVHreg:
+               return rewriteValueARM64_OpARM64MOVHreg(v, config)
        case OpARM64MOVHstore:
                return rewriteValueARM64_OpARM64MOVHstore(v, config)
        case OpARM64MOVWUload:
                return rewriteValueARM64_OpARM64MOVWUload(v, config)
+       case OpARM64MOVWUreg:
+               return rewriteValueARM64_OpARM64MOVWUreg(v, config)
        case OpARM64MOVWload:
                return rewriteValueARM64_OpARM64MOVWload(v, config)
+       case OpARM64MOVWreg:
+               return rewriteValueARM64_OpARM64MOVWreg(v, config)
        case OpARM64MOVWstore:
                return rewriteValueARM64_OpARM64MOVWstore(v, config)
+       case OpARM64MUL:
+               return rewriteValueARM64_OpARM64MUL(v, config)
+       case OpARM64MULW:
+               return rewriteValueARM64_OpARM64MULW(v, config)
+       case OpARM64MVN:
+               return rewriteValueARM64_OpARM64MVN(v, config)
+       case OpARM64NEG:
+               return rewriteValueARM64_OpARM64NEG(v, config)
+       case OpARM64NotEqual:
+               return rewriteValueARM64_OpARM64NotEqual(v, config)
+       case OpARM64OR:
+               return rewriteValueARM64_OpARM64OR(v, config)
+       case OpARM64ORconst:
+               return rewriteValueARM64_OpARM64ORconst(v, config)
+       case OpARM64SLL:
+               return rewriteValueARM64_OpARM64SLL(v, config)
+       case OpARM64SLLconst:
+               return rewriteValueARM64_OpARM64SLLconst(v, config)
+       case OpARM64SRA:
+               return rewriteValueARM64_OpARM64SRA(v, config)
+       case OpARM64SRAconst:
+               return rewriteValueARM64_OpARM64SRAconst(v, config)
+       case OpARM64SRL:
+               return rewriteValueARM64_OpARM64SRL(v, config)
+       case OpARM64SRLconst:
+               return rewriteValueARM64_OpARM64SRLconst(v, config)
+       case OpARM64SUB:
+               return rewriteValueARM64_OpARM64SUB(v, config)
+       case OpARM64SUBconst:
+               return rewriteValueARM64_OpARM64SUBconst(v, config)
+       case OpARM64UDIV:
+               return rewriteValueARM64_OpARM64UDIV(v, config)
+       case OpARM64UDIVW:
+               return rewriteValueARM64_OpARM64UDIVW(v, config)
+       case OpARM64UMOD:
+               return rewriteValueARM64_OpARM64UMOD(v, config)
+       case OpARM64UMODW:
+               return rewriteValueARM64_OpARM64UMODW(v, config)
+       case OpARM64XOR:
+               return rewriteValueARM64_OpARM64XOR(v, config)
+       case OpARM64XORconst:
+               return rewriteValueARM64_OpARM64XORconst(v, config)
        case OpAdd16:
                return rewriteValueARM64_OpAdd16(v, config)
        case OpAdd32:
@@ -519,7221 +621,11810 @@ func rewriteValueARM64(v *Value, config *Config) bool {
        }
        return false
 }
-func rewriteValueARM64_OpARM64ADDconst(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64ADD(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ADDconst [off1] (MOVDaddr [off2] {sym} ptr))
+       // match: (ADD (MOVDconst [c]) x)
        // cond:
-       // result: (MOVDaddr [off1+off2] {sym} ptr)
+       // result: (ADDconst [c] x)
        for {
-               off1 := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym := v_0.Aux
-               ptr := v_0.Args[0]
-               v.reset(OpARM64MOVDaddr)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARM64ADDconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       return false
-}
-func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (ADD x (MOVDconst [c]))
        // cond:
-       // result: (FMOVDload [off1+off2] {sym} ptr mem)
+       // result: (ADDconst [c] x)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64FMOVDload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(OpARM64ADDconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (ADD x (NEG y))
+       // cond:
+       // result: (SUB x y)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64NEG {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               y := v_1.Args[0]
+               v.reset(OpARM64SUB)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+       // match: (ADD (NEG y) x)
+       // cond:
+       // result: (SUB x y)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64NEG {
                        break
                }
-               v.reset(OpARM64FMOVDload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARM64SUB)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
        return false
 }
-func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64ADDconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // match: (ADDconst [off1] (MOVDaddr [off2] {sym} ptr))
        // cond:
-       // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
+       // result: (MOVDaddr [off1+off2] {sym} ptr)
        for {
                off1 := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
+               if v_0.Op != OpARM64MOVDaddr {
                        break
                }
                off2 := v_0.AuxInt
+               sym := v_0.Aux
                ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARM64FMOVDstore)
+               v.reset(OpARM64MOVDaddr)
                v.AuxInt = off1 + off2
                v.Aux = sym
                v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
                return true
        }
-       // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // match: (ADDconst [0]  x)
+       // cond:
+       // result: x
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
+               if v.AuxInt != 0 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (ADDconst [c] (MOVDconst [d]))
+       // cond:
+       // result: (MOVDconst [c+d])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpARM64FMOVDstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = c + d
                return true
        }
-       return false
-}
-func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (ADDconst [c] (ADDconst [d] x))
        // cond:
-       // result: (FMOVSload [off1+off2] {sym} ptr mem)
+       // result: (ADDconst [c+d] x)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
+               c := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARM64ADDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64FMOVSload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARM64ADDconst)
+               v.AuxInt = c + d
+               v.AddArg(x)
                return true
        }
-       // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (ADDconst [c] (SUBconst [d] x))
+       // cond:
+       // result: (ADDconst [c-d] x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               if v_0.Op != OpARM64SUBconst {
                        break
                }
-               v.reset(OpARM64FMOVSload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARM64ADDconst)
+               v.AuxInt = c - d
+               v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64AND(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // match: (AND (MOVDconst [c]) x)
        // cond:
-       // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
+       // result: (ANDconst [c] x)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARM64FMOVSstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARM64ANDconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // match: (AND x (MOVDconst [c]))
+       // cond:
+       // result: (ANDconst [c] x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_1.AuxInt
+               v.reset(OpARM64ANDconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (AND x x)
+       // cond:
+       // result: x
+       for {
+               x := v.Args[0]
+               if x != v.Args[1] {
                        break
                }
-               v.reset(OpARM64FMOVSstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (AND x (MVN y))
+       // cond:
+       // result: (BIC x y)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MVN {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(OpARM64BIC)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
        return false
 }
-func rewriteValueARM64_OpARM64MOVBUload(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64ANDconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (ANDconst [0]  _)
        // cond:
-       // result: (MOVBUload [off1+off2] {sym} ptr mem)
+       // result: (MOVDconst [0])
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
+               if v.AuxInt != 0 {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64MOVBUload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (ANDconst [-1] x)
+       // cond:
+       // result: x
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
+               if v.AuxInt != -1 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (ANDconst [c] (MOVDconst [d]))
+       // cond:
+       // result: (MOVDconst [c&d])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpARM64MOVBUload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = c & d
+               return true
+       }
+       // match: (ANDconst [c] (ANDconst [d] x))
+       // cond:
+       // result: (ANDconst [c&d] x)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ANDconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARM64ANDconst)
+               v.AuxInt = c & d
+               v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueARM64_OpARM64MOVBload(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64BIC(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (BIC x (MOVDconst [c]))
        // cond:
-       // result: (MOVBload [off1+off2] {sym} ptr mem)
+       // result: (BICconst [c] x)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64MOVBload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(OpARM64BICconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (BIC x x)
+       // cond:
+       // result: (MOVDconst [0])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               x := v.Args[0]
+               if x != v.Args[1] {
                        break
                }
-               v.reset(OpARM64MOVBload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValueARM64_OpARM64MOVBstore(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64BICconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // match: (BICconst [0]  x)
        // cond:
-       // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+       // result: x
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
+               if v.AuxInt != 0 {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARM64MOVBstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // match: (BICconst [-1] _)
+       // cond:
+       // result: (MOVDconst [0])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
+               if v.AuxInt != -1 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (BICconst [c] (MOVDconst [d]))
+       // cond:
+       // result: (MOVDconst [d&^c])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpARM64MOVBstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = d &^ c
                return true
        }
        return false
 }
-func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64CMP(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (CMP x (MOVDconst [c]))
        // cond:
-       // result: (MOVDload [off1+off2] {sym} ptr mem)
+       // result: (CMPconst [c] x)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64MOVDload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(OpARM64CMPconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (CMP (MOVDconst [c]) x)
+       // cond:
+       // result: (InvertFlags (CMPconst [c] x))
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpARM64MOVDload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARM64InvertFlags)
+               v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v0.AuxInt = c
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
        return false
 }
-func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64CMPW(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // match: (CMPW x (MOVDconst [c]))
        // cond:
-       // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+       // result: (CMPWconst [int64(int32(c))] x)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARM64MOVDstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(OpARM64CMPWconst)
+               v.AuxInt = int64(int32(c))
+               v.AddArg(x)
                return true
        }
-       // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // match: (CMPW (MOVDconst [c]) x)
+       // cond:
+       // result: (InvertFlags (CMPWconst [int64(int32(c))] x))
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpARM64MOVDstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARM64InvertFlags)
+               v0 := b.NewValue0(v.Line, OpARM64CMPWconst, TypeFlags)
+               v0.AuxInt = int64(int32(c))
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
        return false
 }
-func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64CMPWconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
-       // cond:
-       // result: (MOVHUload [off1+off2] {sym} ptr mem)
+       // match: (CMPWconst (MOVDconst [x]) [y])
+       // cond: int32(x)==int32(y)
+       // result: (FlagEQ)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
+               y := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64MOVHUload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v_0.AuxInt
+               if !(int32(x) == int32(y)) {
+                       break
+               }
+               v.reset(OpARM64FlagEQ)
                return true
        }
-       // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (CMPWconst (MOVDconst [x]) [y])
+       // cond: int32(x)<int32(y) && uint32(x)<uint32(y)
+       // result: (FlagLT_ULT)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               y := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               x := v_0.AuxInt
+               if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
                        break
                }
-               v.reset(OpARM64MOVHUload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARM64FlagLT_ULT)
                return true
        }
-       return false
-}
-func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
-       // cond:
-       // result: (MOVHload [off1+off2] {sym} ptr mem)
+       // match: (CMPWconst (MOVDconst [x]) [y])
+       // cond: int32(x)<int32(y) && uint32(x)>uint32(y)
+       // result: (FlagLT_UGT)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
+               y := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64MOVHload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v_0.AuxInt
+               if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
+                       break
+               }
+               v.reset(OpARM64FlagLT_UGT)
                return true
        }
-       // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (CMPWconst (MOVDconst [x]) [y])
+       // cond: int32(x)>int32(y) && uint32(x)<uint32(y)
+       // result: (FlagGT_ULT)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               y := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               x := v_0.AuxInt
+               if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
                        break
                }
-               v.reset(OpARM64MOVHload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARM64FlagGT_ULT)
                return true
        }
-       return false
-}
-func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-       // cond:
-       // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+       // match: (CMPWconst (MOVDconst [x]) [y])
+       // cond: int32(x)>int32(y) && uint32(x)>uint32(y)
+       // result: (FlagGT_UGT)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
+               y := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARM64MOVHstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v_0.AuxInt
+               if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
+                       break
+               }
+               v.reset(OpARM64FlagGT_UGT)
                return true
        }
-       // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // match: (CMPWconst (MOVBUreg _) [c])
+       // cond: 0xff < int32(c)
+       // result: (FlagLT_ULT)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
+               if v_0.Op != OpARM64MOVBUreg {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
+               if !(0xff < int32(c)) {
                        break
                }
-               v.reset(OpARM64MOVHstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpARM64FlagLT_ULT)
+               return true
+       }
+       // match: (CMPWconst (MOVHUreg _) [c])
+       // cond: 0xffff < int32(c)
+       // result: (FlagLT_ULT)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVHUreg {
+                       break
+               }
+               if !(0xffff < int32(c)) {
+                       break
+               }
+               v.reset(OpARM64FlagLT_ULT)
                return true
        }
        return false
 }
-func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64CMPconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
-       // cond:
-       // result: (MOVWUload [off1+off2] {sym} ptr mem)
+       // match: (CMPconst  (MOVDconst [x]) [y])
+       // cond: x==y
+       // result: (FlagEQ)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
+               y := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64MOVWUload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v_0.AuxInt
+               if !(x == y) {
+                       break
+               }
+               v.reset(OpARM64FlagEQ)
                return true
        }
-       // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (CMPconst  (MOVDconst [x]) [y])
+       // cond: int64(x)<int64(y) && uint64(x)<uint64(y)
+       // result: (FlagLT_ULT)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               y := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               x := v_0.AuxInt
+               if !(int64(x) < int64(y) && uint64(x) < uint64(y)) {
                        break
                }
-               v.reset(OpARM64MOVWUload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARM64FlagLT_ULT)
                return true
        }
-       return false
-}
-func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
-       // cond:
-       // result: (MOVWload [off1+off2] {sym} ptr mem)
+       // match: (CMPconst  (MOVDconst [x]) [y])
+       // cond: int64(x)<int64(y) && uint64(x)>uint64(y)
+       // result: (FlagLT_UGT)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
+               y := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64MOVWload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v_0.AuxInt
+               if !(int64(x) < int64(y) && uint64(x) > uint64(y)) {
+                       break
+               }
+               v.reset(OpARM64FlagLT_UGT)
                return true
        }
-       // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (CMPconst  (MOVDconst [x]) [y])
+       // cond: int64(x)>int64(y) && uint64(x)<uint64(y)
+       // result: (FlagGT_ULT)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               y := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               x := v_0.AuxInt
+               if !(int64(x) > int64(y) && uint64(x) < uint64(y)) {
                        break
                }
-               v.reset(OpARM64MOVWload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARM64FlagGT_ULT)
                return true
        }
-       return false
-}
-func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-       // cond:
-       // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+       // match: (CMPconst  (MOVDconst [x]) [y])
+       // cond: int64(x)>int64(y) && uint64(x)>uint64(y)
+       // result: (FlagGT_UGT)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
+               y := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARM64MOVWstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v_0.AuxInt
+               if !(int64(x) > int64(y) && uint64(x) > uint64(y)) {
+                       break
+               }
+               v.reset(OpARM64FlagGT_UGT)
                return true
        }
-       // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // match: (CMPconst (MOVBUreg _) [c])
+       // cond: 0xff < c
+       // result: (FlagLT_ULT)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
+               if v_0.Op != OpARM64MOVBUreg {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
+               if !(0xff < c) {
                        break
                }
-               v.reset(OpARM64MOVWstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpARM64FlagLT_ULT)
+               return true
+       }
+       // match: (CMPconst (MOVHUreg _) [c])
+       // cond: 0xffff < c
+       // result: (FlagLT_ULT)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVHUreg {
+                       break
+               }
+               if !(0xffff < c) {
+                       break
+               }
+               v.reset(OpARM64FlagLT_ULT)
+               return true
+       }
+       // match: (CMPconst (MOVWUreg _) [c])
+       // cond: 0xffffffff < c
+       // result: (FlagLT_ULT)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVWUreg {
+                       break
+               }
+               if !(0xffffffff < c) {
+                       break
+               }
+               v.reset(OpARM64FlagLT_ULT)
+               return true
+       }
+       // match: (CMPconst (ANDconst _ [m]) [n])
+       // cond: 0 <= m && m < n
+       // result: (FlagLT_ULT)
+       for {
+               n := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ANDconst {
+                       break
+               }
+               m := v_0.AuxInt
+               if !(0 <= m && m < n) {
+                       break
+               }
+               v.reset(OpARM64FlagLT_ULT)
+               return true
+       }
+       // match: (CMPconst (SRLconst _ [c]) [n])
+       // cond: 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n)
+       // result: (FlagLT_ULT)
+       for {
+               n := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64SRLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               if !(0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n)) {
+                       break
+               }
+               v.reset(OpARM64FlagLT_ULT)
                return true
        }
        return false
 }
-func rewriteValueARM64_OpAdd16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64CSELULT(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Add16 x y)
+       // match: (CSELULT _ y (FlagEQ))
        // cond:
-       // result: (ADD x y)
+       // result: y
        for {
-               x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64ADD)
-               v.AddArg(x)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARM64FlagEQ {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = y.Type
                v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM64_OpAdd32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add32 x y)
+       // match: (CSELULT x _ (FlagLT_ULT))
        // cond:
-       // result: (ADD x y)
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64ADD)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARM64FlagLT_ULT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM64_OpAdd32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add32F x y)
+       // match: (CSELULT _ y (FlagLT_UGT))
        // cond:
-       // result: (FADDS x y)
+       // result: y
        for {
-               x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64FADDS)
-               v.AddArg(x)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARM64FlagLT_UGT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = y.Type
                v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM64_OpAdd64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add64 x y)
+       // match: (CSELULT x _ (FlagGT_ULT))
        // cond:
-       // result: (ADD x y)
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64ADD)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARM64FlagGT_ULT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM64_OpAdd64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add64F x y)
+       // match: (CSELULT _ y (FlagGT_UGT))
        // cond:
-       // result: (FADDD x y)
+       // result: y
        for {
-               x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64FADDD)
-               v.AddArg(x)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARM64FlagGT_UGT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = y.Type
                v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpAdd8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64DIV(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Add8 x y)
+       // match: (DIV   (MOVDconst [c]) (MOVDconst [d]))
        // cond:
-       // result: (ADD x y)
+       // result: (MOVDconst [int64(c)/int64(d)])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64ADD)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               d := v_1.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(c) / int64(d)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpAddPtr(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64DIVW(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (AddPtr x y)
+       // match: (DIVW  (MOVDconst [c]) (MOVDconst [d]))
        // cond:
-       // result: (ADD x y)
+       // result: (MOVDconst [int64(int32(c)/int32(d))])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64ADD)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               d := v_1.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(int32(c) / int32(d))
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpAddr(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64Equal(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Addr {sym} base)
+       // match: (Equal (FlagEQ))
        // cond:
-       // result: (MOVDaddr {sym} base)
+       // result: (MOVDconst [1])
        for {
-               sym := v.Aux
-               base := v.Args[0]
-               v.reset(OpARM64MOVDaddr)
-               v.Aux = sym
-               v.AddArg(base)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagEQ {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpAnd16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And16 x y)
+       // match: (Equal (FlagLT_ULT))
        // cond:
-       // result: (AND x y)
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64AND)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpAnd32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And32 x y)
+       // match: (Equal (FlagLT_UGT))
        // cond:
-       // result: (AND x y)
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64AND)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_UGT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpAnd64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And64 x y)
+       // match: (Equal (FlagGT_ULT))
        // cond:
-       // result: (AND x y)
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64AND)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpAnd8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And8 x y)
+       // match: (Equal (FlagGT_UGT))
        // cond:
-       // result: (AND x y)
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64AND)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_UGT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpAndB(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (AndB x y)
+       // match: (Equal (InvertFlags x))
        // cond:
-       // result: (AND x y)
+       // result: (Equal x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64AND)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64InvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARM64Equal)
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpAvg64u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Avg64u <t> x y)
+       // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (ADD (ADD <t> (SRLconst <t> x [1]) (SRLconst <t> y [1])) (AND <t> (AND <t> x y) (MOVDconst [1])))
+       // result: (FMOVDload [off1+off2] {sym} ptr mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64ADD)
-               v0 := b.NewValue0(v.Line, OpARM64ADD, t)
-               v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
-               v1.AuxInt = 1
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpARM64SRLconst, t)
-               v2.AuxInt = 1
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v3 := b.NewValue0(v.Line, OpARM64AND, t)
-               v4 := b.NewValue0(v.Line, OpARM64AND, t)
-               v4.AddArg(x)
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v5 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v5.AuxInt = 1
-               v3.AddArg(v5)
-               v.AddArg(v3)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64FMOVDload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpClosureCall(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ClosureCall [argwid] entry closure mem)
-       // cond:
-       // result: (CALLclosure [argwid] entry closure mem)
+       // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               argwid := v.AuxInt
-               entry := v.Args[0]
-               closure := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARM64CALLclosure)
-               v.AuxInt = argwid
-               v.AddArg(entry)
-               v.AddArg(closure)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64FMOVDload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
                v.AddArg(mem)
                return true
        }
+       // match: (FMOVDload [off] {sym} ptr (FMOVDstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64FMOVDstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       return false
 }
-func rewriteValueARM64_OpCom16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Com16 x)
+       // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
        // cond:
-       // result: (MVN x)
+       // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
        for {
-               x := v.Args[0]
-               v.reset(OpARM64MVN)
-               v.AddArg(x)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARM64FMOVDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpCom32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Com32 x)
-       // cond:
-       // result: (MVN x)
+       // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
        for {
-               x := v.Args[0]
-               v.reset(OpARM64MVN)
-               v.AddArg(x)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64FMOVDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpCom64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Com64 x)
+       // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (MVN x)
+       // result: (FMOVSload [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               v.reset(OpARM64MVN)
-               v.AddArg(x)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64FMOVSload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpCom8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Com8 x)
-       // cond:
-       // result: (MVN x)
+       // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               x := v.Args[0]
-               v.reset(OpARM64MVN)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64FMOVSload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (FMOVSload [off] {sym} ptr (FMOVSstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64FMOVSstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpConst16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Const16 [val])
+       // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
        // cond:
-       // result: (MOVDconst [val])
+       // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
        for {
-               val := v.AuxInt
-               v.reset(OpARM64MOVDconst)
-               v.AuxInt = val
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARM64FMOVSstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64FMOVSstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpConst32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64GreaterEqual(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Const32 [val])
+       // match: (GreaterEqual (FlagEQ))
        // cond:
-       // result: (MOVDconst [val])
+       // result: (MOVDconst [1])
        for {
-               val := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagEQ {
+                       break
+               }
                v.reset(OpARM64MOVDconst)
-               v.AuxInt = val
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpConst32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const32F [val])
+       // match: (GreaterEqual (FlagLT_ULT))
        // cond:
-       // result: (FMOVSconst [val])
+       // result: (MOVDconst [0])
        for {
-               val := v.AuxInt
-               v.reset(OpARM64FMOVSconst)
-               v.AuxInt = val
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpConst64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const64 [val])
+       // match: (GreaterEqual (FlagLT_UGT))
        // cond:
-       // result: (MOVDconst [val])
+       // result: (MOVDconst [0])
        for {
-               val := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_UGT {
+                       break
+               }
                v.reset(OpARM64MOVDconst)
-               v.AuxInt = val
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpConst64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const64F [val])
+       // match: (GreaterEqual (FlagGT_ULT))
        // cond:
-       // result: (FMOVDconst [val])
+       // result: (MOVDconst [1])
        for {
-               val := v.AuxInt
-               v.reset(OpARM64FMOVDconst)
-               v.AuxInt = val
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpConst8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const8 [val])
+       // match: (GreaterEqual (FlagGT_UGT))
        // cond:
-       // result: (MOVDconst [val])
+       // result: (MOVDconst [1])
        for {
-               val := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_UGT {
+                       break
+               }
                v.reset(OpARM64MOVDconst)
-               v.AuxInt = val
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpConstBool(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ConstBool [b])
+       // match: (GreaterEqual (InvertFlags x))
        // cond:
-       // result: (MOVDconst [b])
+       // result: (LessEqual x)
        for {
-               b := v.AuxInt
-               v.reset(OpARM64MOVDconst)
-               v.AuxInt = b
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64InvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARM64LessEqual)
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpConstNil(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64GreaterEqualU(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ConstNil)
+       // match: (GreaterEqualU (FlagEQ))
+       // cond:
+       // result: (MOVDconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagEQ {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (GreaterEqualU (FlagLT_ULT))
        // cond:
        // result: (MOVDconst [0])
        for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_ULT {
+                       break
+               }
                v.reset(OpARM64MOVDconst)
                v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpConvert(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Convert x mem)
+       // match: (GreaterEqualU (FlagLT_UGT))
        // cond:
-       // result: (MOVDconvert x mem)
+       // result: (MOVDconst [1])
        for {
-               x := v.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64MOVDconvert)
-               v.AddArg(x)
-               v.AddArg(mem)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_UGT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpCvt32Fto32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32Fto32 x)
+       // match: (GreaterEqualU (FlagGT_ULT))
        // cond:
-       // result: (FCVTZSSW x)
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               v.reset(OpARM64FCVTZSSW)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpCvt32Fto32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32Fto32U x)
+       // match: (GreaterEqualU (FlagGT_UGT))
        // cond:
-       // result: (FCVTZUSW x)
+       // result: (MOVDconst [1])
        for {
-               x := v.Args[0]
-               v.reset(OpARM64FCVTZUSW)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_UGT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpCvt32Fto64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32Fto64 x)
+       // match: (GreaterEqualU (InvertFlags x))
        // cond:
-       // result: (FCVTZSS x)
+       // result: (LessEqualU x)
        for {
-               x := v.Args[0]
-               v.reset(OpARM64FCVTZSS)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64InvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARM64LessEqualU)
                v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpCvt32Fto64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64GreaterThan(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt32Fto64F x)
+       // match: (GreaterThan (FlagEQ))
        // cond:
-       // result: (FCVTSD x)
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               v.reset(OpARM64FCVTSD)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagEQ {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpCvt32Uto32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32Uto32F x)
+       // match: (GreaterThan (FlagLT_ULT))
        // cond:
-       // result: (UCVTFWS x)
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               v.reset(OpARM64UCVTFWS)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpCvt32Uto64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32Uto64F x)
+       // match: (GreaterThan (FlagLT_UGT))
        // cond:
-       // result: (UCVTFWD x)
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               v.reset(OpARM64UCVTFWD)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_UGT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpCvt32to32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32to32F x)
+       // match: (GreaterThan (FlagGT_ULT))
        // cond:
-       // result: (SCVTFWS x)
+       // result: (MOVDconst [1])
        for {
-               x := v.Args[0]
-               v.reset(OpARM64SCVTFWS)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpCvt32to64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32to64F x)
+       // match: (GreaterThan (FlagGT_UGT))
        // cond:
-       // result: (SCVTFWD x)
+       // result: (MOVDconst [1])
        for {
-               x := v.Args[0]
-               v.reset(OpARM64SCVTFWD)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_UGT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpCvt64Fto32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt64Fto32 x)
+       // match: (GreaterThan (InvertFlags x))
        // cond:
-       // result: (FCVTZSDW x)
+       // result: (LessThan x)
        for {
-               x := v.Args[0]
-               v.reset(OpARM64FCVTZSDW)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64InvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARM64LessThan)
                v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpCvt64Fto32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64GreaterThanU(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt64Fto32F x)
+       // match: (GreaterThanU (FlagEQ))
        // cond:
-       // result: (FCVTDS x)
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               v.reset(OpARM64FCVTDS)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagEQ {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpCvt64Fto32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt64Fto32U x)
+       // match: (GreaterThanU (FlagLT_ULT))
        // cond:
-       // result: (FCVTZUDW x)
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               v.reset(OpARM64FCVTZUDW)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpCvt64Fto64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt64Fto64 x)
+       // match: (GreaterThanU (FlagLT_UGT))
        // cond:
-       // result: (FCVTZSD x)
+       // result: (MOVDconst [1])
        for {
-               x := v.Args[0]
-               v.reset(OpARM64FCVTZSD)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_UGT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpCvt64to32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt64to32F x)
+       // match: (GreaterThanU (FlagGT_ULT))
        // cond:
-       // result: (SCVTFS x)
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               v.reset(OpARM64SCVTFS)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpCvt64to64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt64to64F x)
+       // match: (GreaterThanU (FlagGT_UGT))
        // cond:
-       // result: (SCVTFD x)
+       // result: (MOVDconst [1])
        for {
-               x := v.Args[0]
-               v.reset(OpARM64SCVTFD)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_UGT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (GreaterThanU (InvertFlags x))
+       // cond:
+       // result: (LessThanU x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64InvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARM64LessThanU)
                v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpDeferCall(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64LessEqual(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (DeferCall [argwid] mem)
+       // match: (LessEqual (FlagEQ))
        // cond:
-       // result: (CALLdefer [argwid] mem)
+       // result: (MOVDconst [1])
        for {
-               argwid := v.AuxInt
-               mem := v.Args[0]
-               v.reset(OpARM64CALLdefer)
-               v.AuxInt = argwid
-               v.AddArg(mem)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagEQ {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpDiv16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div16 x y)
+       // match: (LessEqual (FlagLT_ULT))
        // cond:
-       // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
+       // result: (MOVDconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64DIVW)
-               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpDiv16u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div16u x y)
+       // match: (LessEqual (FlagLT_UGT))
        // cond:
-       // result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
+       // result: (MOVDconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64UDIVW)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_UGT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpDiv32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div32 x y)
+       // match: (LessEqual (FlagGT_ULT))
        // cond:
-       // result: (DIVW x y)
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64DIVW)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpDiv32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div32F x y)
+       // match: (LessEqual (FlagGT_UGT))
        // cond:
-       // result: (FDIVS x y)
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64FDIVS)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_UGT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpDiv32u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div32u x y)
+       // match: (LessEqual (InvertFlags x))
        // cond:
-       // result: (UDIVW x y)
+       // result: (GreaterEqual x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64UDIVW)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64InvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARM64GreaterEqual)
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpDiv64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64LessEqualU(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div64 x y)
+       // match: (LessEqualU (FlagEQ))
        // cond:
-       // result: (DIV x y)
+       // result: (MOVDconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64DIV)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagEQ {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpDiv64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div64F x y)
+       // match: (LessEqualU (FlagLT_ULT))
        // cond:
-       // result: (FDIVD x y)
+       // result: (MOVDconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64FDIVD)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpDiv64u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div64u x y)
+       // match: (LessEqualU (FlagLT_UGT))
        // cond:
-       // result: (UDIV x y)
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64UDIV)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_UGT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpDiv8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div8 x y)
+       // match: (LessEqualU (FlagGT_ULT))
        // cond:
-       // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
+       // result: (MOVDconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64DIVW)
-               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpDiv8u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div8u x y)
+       // match: (LessEqualU (FlagGT_UGT))
        // cond:
-       // result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64UDIVW)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_UGT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpEq16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq16 x y)
+       // match: (LessEqualU (InvertFlags x))
        // cond:
-       // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (GreaterEqualU x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64Equal)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64InvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARM64GreaterEqualU)
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpEq32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64LessThan(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Eq32 x y)
+       // match: (LessThan (FlagEQ))
        // cond:
-       // result: (Equal (CMPW x y))
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64Equal)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagEQ {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpEq32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq32F x y)
+       // match: (LessThan (FlagLT_ULT))
        // cond:
-       // result: (Equal (FCMPS x y))
+       // result: (MOVDconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64Equal)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpEq64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq64 x y)
+       // match: (LessThan (FlagLT_UGT))
        // cond:
-       // result: (Equal (CMP x y))
+       // result: (MOVDconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64Equal)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_UGT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpEq64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq64F x y)
+       // match: (LessThan (FlagGT_ULT))
        // cond:
-       // result: (Equal (FCMPD x y))
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64Equal)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValueARM64_OpEq8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq8 x y)
-       // cond:
-       // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64Equal)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpEqB(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (EqB x y)
+       // match: (LessThan (FlagGT_UGT))
        // cond:
-       // result: (XOR (MOVDconst [1]) (XOR <config.fe.TypeBool()> x y))
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64XOR)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 1
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64XOR, config.fe.TypeBool())
-               v1.AddArg(x)
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_UGT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpEqPtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (EqPtr x y)
+       // match: (LessThan (InvertFlags x))
        // cond:
-       // result: (Equal (CMP x y))
+       // result: (GreaterThan x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64Equal)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64InvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARM64GreaterThan)
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpGeq16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64LessThanU(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq16 x y)
+       // match: (LessThanU (FlagEQ))
        // cond:
-       // result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagEQ {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpGeq16U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq16U x y)
+       // match: (LessThanU (FlagLT_ULT))
        // cond:
-       // result: (GreaterEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (MOVDconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterEqualU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpGeq32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq32 x y)
+       // match: (LessThanU (FlagLT_UGT))
        // cond:
-       // result: (GreaterEqual (CMPW x y))
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_UGT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpGeq32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq32F x y)
+       // match: (LessThanU (FlagGT_ULT))
        // cond:
-       // result: (GreaterEqual (FCMPS x y))
+       // result: (MOVDconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_ULT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM64_OpGeq32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq32U x y)
+       // match: (LessThanU (FlagGT_UGT))
        // cond:
-       // result: (GreaterEqualU (CMPW x y))
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterEqualU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_UGT {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpGeq64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq64 x y)
+       // match: (LessThanU (InvertFlags x))
        // cond:
-       // result: (GreaterEqual (CMP x y))
+       // result: (GreaterThanU x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64InvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARM64GreaterThanU)
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpGeq64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOD(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq64F x y)
+       // match: (MOD   (MOVDconst [c]) (MOVDconst [d]))
        // cond:
-       // result: (GreaterEqual (FCMPD x y))
+       // result: (MOVDconst [int64(c)%int64(d)])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               d := v_1.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(c) % int64(d)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpGeq64U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MODW(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq64U x y)
+       // match: (MODW  (MOVDconst [c]) (MOVDconst [d]))
        // cond:
-       // result: (GreaterEqualU (CMP x y))
+       // result: (MOVDconst [int64(int32(c)%int32(d))])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterEqualU)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               d := v_1.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(int32(c) % int32(d))
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpGeq8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVBUload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq8 x y)
+       // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+       // result: (MOVBUload [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64MOVBUload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpGeq8U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq8U x y)
-       // cond:
-       // result: (GreaterEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterEqualU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVBUload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpGetClosurePtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (GetClosurePtr)
-       // cond:
-       // result: (LoweredGetClosurePtr)
+       // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
        for {
-               v.reset(OpARM64LoweredGetClosurePtr)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVBstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpGoCall(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVBUreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (GoCall [argwid] mem)
+       // match: (MOVBUreg x:(MOVBUload _ _))
        // cond:
-       // result: (CALLgo [argwid] mem)
+       // result: (MOVDreg x)
        for {
-               argwid := v.AuxInt
-               mem := v.Args[0]
-               v.reset(OpARM64CALLgo)
-               v.AuxInt = argwid
-               v.AddArg(mem)
+               x := v.Args[0]
+               if x.Op != OpARM64MOVBUload {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpGreater16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater16 x y)
+       // match: (MOVBUreg x:(MOVBUreg _))
        // cond:
-       // result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+       // result: (MOVDreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterThan)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if x.Op != OpARM64MOVBUreg {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpGreater16U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater16U x y)
+       // match: (MOVBUreg (MOVDconst [c]))
        // cond:
-       // result: (GreaterThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (MOVDconst [int64(uint8(c))])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterThanU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(uint8(c))
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpGreater32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVBload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater32 x y)
+       // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (GreaterThan (CMPW x y))
+       // result: (MOVBload [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterThan)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64MOVBload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpGreater32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater32F x y)
-       // cond:
-       // result: (GreaterThan (FCMPS x y))
+       // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterThan)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVBload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpGreater32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater32U x y)
-       // cond:
-       // result: (GreaterThanU (CMPW x y))
+       // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterThanU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVBstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpGreater64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVBreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater64 x y)
+       // match: (MOVBreg x:(MOVBload _ _))
        // cond:
-       // result: (GreaterThan (CMP x y))
+       // result: (MOVDreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterThan)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               if x.Op != OpARM64MOVBload {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpGreater64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater64F x y)
+       // match: (MOVBreg x:(MOVBreg _))
        // cond:
-       // result: (GreaterThan (FCMPD x y))
+       // result: (MOVDreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterThan)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               if x.Op != OpARM64MOVBreg {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpGreater64U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater64U x y)
+       // match: (MOVBreg  (MOVDconst [c]))
        // cond:
-       // result: (GreaterThanU (CMP x y))
+       // result: (MOVDconst [int64(int8(c))])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterThanU)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(int8(c))
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpGreater8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVBstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater8 x y)
+       // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
        // cond:
-       // result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+       // result: (MOVBstore [off1+off2] {sym} ptr val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterThan)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVBstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpGreater8U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater8U x y)
-       // cond:
-       // result: (GreaterThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterThanU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVBstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpHmul16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul16 x y)
+       // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
        // cond:
-       // result: (SRAconst (MULW <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
+       // result: (MOVBstore [off] {sym} ptr x mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64SRAconst)
-               v.AuxInt = 16
-               v0 := b.NewValue0(v.Line, OpARM64MULW, config.fe.TypeInt32())
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVBreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpHmul16u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul16u x y)
+       // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
        // cond:
-       // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
+       // result: (MOVBstore [off] {sym} ptr x mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64SRLconst)
-               v.AuxInt = 16
-               v0 := b.NewValue0(v.Line, OpARM64MUL, config.fe.TypeUInt32())
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVBUreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpHmul32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul32 x y)
+       // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
        // cond:
-       // result: (SRAconst (MULL <config.fe.TypeInt64()> x y) [32])
+       // result: (MOVBstore [off] {sym} ptr x mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64SRAconst)
-               v.AuxInt = 32
-               v0 := b.NewValue0(v.Line, OpARM64MULL, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVHreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpHmul32u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul32u x y)
+       // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
        // cond:
-       // result: (SRAconst (UMULL <config.fe.TypeUInt64()> x y) [32])
+       // result: (MOVBstore [off] {sym} ptr x mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64SRAconst)
-               v.AuxInt = 32
-               v0 := b.NewValue0(v.Line, OpARM64UMULL, config.fe.TypeUInt64())
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVHUreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpHmul64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul64 x y)
+       // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
        // cond:
-       // result: (MULH x y)
+       // result: (MOVBstore [off] {sym} ptr x mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64MULH)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVWreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
                v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpHmul64u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul64u x y)
+       // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
        // cond:
-       // result: (UMULH x y)
+       // result: (MOVBstore [off] {sym} ptr x mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64UMULH)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVWUreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
                v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpHmul8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Hmul8 x y)
+       // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (SRAconst (MULW <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
+       // result: (MOVDload [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64SRAconst)
-               v.AuxInt = 8
-               v0 := b.NewValue0(v.Line, OpARM64MULW, config.fe.TypeInt16())
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64MOVDload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpHmul8u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul8u x y)
-       // cond:
-       // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
+       // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64SRLconst)
-               v.AuxInt = 8
-               v0 := b.NewValue0(v.Line, OpARM64MUL, config.fe.TypeUInt16())
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVDload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpInterCall(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVDreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (InterCall [argwid] entry mem)
+       // match: (MOVDreg x)
+       // cond: x.Uses == 1
+       // result: (MOVDnop x)
+       for {
+               x := v.Args[0]
+               if !(x.Uses == 1) {
+                       break
+               }
+               v.reset(OpARM64MOVDnop)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVDreg  (MOVDconst [c]))
        // cond:
-       // result: (CALLinter [argwid] entry mem)
+       // result: (MOVDconst [c])
        for {
-               argwid := v.AuxInt
-               entry := v.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64CALLinter)
-               v.AuxInt = argwid
-               v.AddArg(entry)
-               v.AddArg(mem)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = c
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpIsInBounds(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (IsInBounds idx len)
+       // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
        // cond:
-       // result: (LessThanU (CMP idx len))
+       // result: (MOVDstore [off1+off2] {sym} ptr val mem)
        for {
-               idx := v.Args[0]
-               len := v.Args[1]
-               v.reset(OpARM64LessThanU)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(idx)
-               v0.AddArg(len)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpIsNonNil(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (IsNonNil ptr)
+       // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (NotEqual (CMPconst [0] ptr))
+       // result: (MOVHUload [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64MOVHUload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVHUload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
        for {
+               off := v.AuxInt
+               sym := v.Aux
                ptr := v.Args[0]
-               v.reset(OpARM64NotEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v0.AuxInt = 0
-               v0.AddArg(ptr)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVHstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpIsSliceInBounds(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVHUreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (IsSliceInBounds idx len)
+       // match: (MOVHUreg x:(MOVBUload _ _))
        // cond:
-       // result: (LessEqualU (CMP idx len))
+       // result: (MOVDreg x)
        for {
-               idx := v.Args[0]
-               len := v.Args[1]
-               v.reset(OpARM64LessEqualU)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(idx)
-               v0.AddArg(len)
-               v.AddArg(v0)
+               x := v.Args[0]
+               if x.Op != OpARM64MOVBUload {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLeq16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq16 x y)
+       // match: (MOVHUreg x:(MOVHUload _ _))
        // cond:
-       // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+       // result: (MOVDreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64LessEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if x.Op != OpARM64MOVHUload {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLeq16U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq16U x y)
+       // match: (MOVHUreg x:(MOVBUreg _))
        // cond:
-       // result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (MOVDreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64LessEqualU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if x.Op != OpARM64MOVBUreg {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLeq32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq32 x y)
+       // match: (MOVHUreg x:(MOVHUreg _))
        // cond:
-       // result: (LessEqual (CMPW x y))
+       // result: (MOVDreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64LessEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               if x.Op != OpARM64MOVHUreg {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLeq32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq32F x y)
+       // match: (MOVHUreg (MOVDconst [c]))
        // cond:
-       // result: (GreaterEqual (FCMPS y x))
+       // result: (MOVDconst [int64(uint16(c))])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(uint16(c))
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpLeq32U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq32U x y)
+       // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (LessEqualU (CMPW x y))
+       // result: (MOVHload [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64LessEqualU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64MOVHload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpLeq64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq64 x y)
-       // cond:
-       // result: (LessEqual (CMP x y))
+       // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64LessEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVHload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpLeq64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq64F x y)
-       // cond:
-       // result: (GreaterEqual (FCMPD y x))
+       // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVHstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpLeq64U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVHreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq64U x y)
+       // match: (MOVHreg x:(MOVBload _ _))
        // cond:
-       // result: (LessEqualU (CMP x y))
+       // result: (MOVDreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64LessEqualU)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               if x.Op != OpARM64MOVBload {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLeq8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq8 x y)
+       // match: (MOVHreg x:(MOVBUload _ _))
        // cond:
-       // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+       // result: (MOVDreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64LessEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if x.Op != OpARM64MOVBUload {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLeq8U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq8U x y)
+       // match: (MOVHreg x:(MOVHload _ _))
        // cond:
-       // result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (MOVDreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64LessEqualU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if x.Op != OpARM64MOVHload {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLess16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less16 x y)
+       // match: (MOVHreg x:(MOVBreg _))
        // cond:
-       // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+       // result: (MOVDreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64LessThan)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if x.Op != OpARM64MOVBreg {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLess16U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less16U x y)
+       // match: (MOVHreg x:(MOVBUreg _))
        // cond:
-       // result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (MOVDreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64LessThanU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if x.Op != OpARM64MOVBUreg {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLess32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less32 x y)
+       // match: (MOVHreg x:(MOVHreg _))
        // cond:
-       // result: (LessThan (CMPW x y))
+       // result: (MOVDreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64LessThan)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               if x.Op != OpARM64MOVHreg {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLess32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less32F x y)
+       // match: (MOVHreg  (MOVDconst [c]))
        // cond:
-       // result: (GreaterThan (FCMPS y x))
+       // result: (MOVDconst [int64(int16(c))])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterThan)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(int16(c))
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpLess32U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less32U x y)
+       // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
        // cond:
-       // result: (LessThanU (CMPW x y))
+       // result: (MOVHstore [off1+off2] {sym} ptr val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64LessThanU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValueARM64_OpLess64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less64 x y)
-       // cond:
-       // result: (LessThan (CMP x y))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64LessThan)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValueARM64_OpLess64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less64F x y)
-       // cond:
-       // result: (GreaterThan (FCMPD y x))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterThan)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValueARM64_OpLess64U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less64U x y)
-       // cond:
-       // result: (LessThanU (CMP x y))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64LessThanU)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValueARM64_OpLess8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less8 x y)
-       // cond:
-       // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64LessThan)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValueARM64_OpLess8U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less8U x y)
-       // cond:
-       // result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64LessThanU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValueARM64_OpLoad(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Load <t> ptr mem)
-       // cond: t.IsBoolean()
-       // result: (MOVBUload ptr mem)
-       for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(t.IsBoolean()) {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
                        break
                }
-               v.reset(OpARM64MOVBUload)
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVHstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is8BitInt(t) && isSigned(t))
-       // result: (MOVBload ptr mem)
+       // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is8BitInt(t) && isSigned(t)) {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
                        break
                }
-               v.reset(OpARM64MOVBload)
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVHstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
                v.AddArg(ptr)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is8BitInt(t) && !isSigned(t))
-       // result: (MOVBUload ptr mem)
+       // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+       // cond:
+       // result: (MOVHstore [off] {sym} ptr x mem)
        for {
-               t := v.Type
+               off := v.AuxInt
+               sym := v.Aux
                ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is8BitInt(t) && !isSigned(t)) {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVHreg {
                        break
                }
-               v.reset(OpARM64MOVBUload)
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVHstore)
+               v.AuxInt = off
+               v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(x)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is16BitInt(t) && isSigned(t))
-       // result: (MOVHload ptr mem)
+       // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+       // cond:
+       // result: (MOVHstore [off] {sym} ptr x mem)
        for {
-               t := v.Type
+               off := v.AuxInt
+               sym := v.Aux
                ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is16BitInt(t) && isSigned(t)) {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVHUreg {
                        break
                }
-               v.reset(OpARM64MOVHload)
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVHstore)
+               v.AuxInt = off
+               v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(x)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is16BitInt(t) && !isSigned(t))
-       // result: (MOVHUload ptr mem)
+       // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+       // cond:
+       // result: (MOVHstore [off] {sym} ptr x mem)
        for {
-               t := v.Type
+               off := v.AuxInt
+               sym := v.Aux
                ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is16BitInt(t) && !isSigned(t)) {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVWreg {
                        break
                }
-               v.reset(OpARM64MOVHUload)
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVHstore)
+               v.AuxInt = off
+               v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(x)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is32BitInt(t) && isSigned(t))
-       // result: (MOVWload ptr mem)
+       // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
+       // cond:
+       // result: (MOVHstore [off] {sym} ptr x mem)
        for {
-               t := v.Type
+               off := v.AuxInt
+               sym := v.Aux
                ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is32BitInt(t) && isSigned(t)) {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVWUreg {
                        break
                }
-               v.reset(OpARM64MOVWload)
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVHstore)
+               v.AuxInt = off
+               v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(x)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is32BitInt(t) && !isSigned(t))
-       // result: (MOVWUload ptr mem)
+       return false
+}
+func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // cond:
+       // result: (MOVWUload [off1+off2] {sym} ptr mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is32BitInt(t) && !isSigned(t)) {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
                        break
                }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
                v.reset(OpARM64MOVWUload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is64BitInt(t) || isPtr(t))
-       // result: (MOVDload ptr mem)
+       // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
                mem := v.Args[1]
-               if !(is64BitInt(t) || isPtr(t)) {
+               if !(canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpARM64MOVDload)
+               v.reset(OpARM64MOVWUload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
                v.AddArg(ptr)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: is32BitFloat(t)
-       // result: (FMOVSload ptr mem)
+       // match: (MOVWUload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is32BitFloat(t)) {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVWstore {
                        break
                }
-               v.reset(OpARM64FMOVSload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Load <t> ptr mem)
-       // cond: is64BitFloat(t)
-       // result: (FMOVDload ptr mem)
-       for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is64BitFloat(t)) {
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
                        break
                }
-               v.reset(OpARM64FMOVDload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueARM64_OpLrot16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVWUreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lrot16 <t> x [c])
+       // match: (MOVWUreg x:(MOVBUload _ _))
        // cond:
-       // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> (ZeroExt16to64 x) [16-c&15]))
+       // result: (MOVDreg x)
        for {
-               t := v.Type
-               c := v.AuxInt
                x := v.Args[0]
-               v.reset(OpARM64OR)
-               v0 := b.NewValue0(v.Line, OpARM64SLLconst, t)
-               v0.AuxInt = c & 15
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
-               v1.AuxInt = 16 - c&15
-               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v2.AddArg(x)
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               if x.Op != OpARM64MOVBUload {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLrot32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lrot32 x [c])
+       // match: (MOVWUreg x:(MOVHUload _ _))
        // cond:
-       // result: (RORWconst x [32-c&31])
+       // result: (MOVDreg x)
        for {
-               c := v.AuxInt
                x := v.Args[0]
-               v.reset(OpARM64RORWconst)
-               v.AuxInt = 32 - c&31
+               if x.Op != OpARM64MOVHUload {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
                v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLrot64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lrot64 x [c])
+       // match: (MOVWUreg x:(MOVWUload _ _))
        // cond:
-       // result: (RORconst  x [64-c&63])
+       // result: (MOVDreg x)
        for {
-               c := v.AuxInt
                x := v.Args[0]
-               v.reset(OpARM64RORconst)
-               v.AuxInt = 64 - c&63
+               if x.Op != OpARM64MOVWUload {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
                v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLrot8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lrot8  <t> x [c])
+       // match: (MOVWUreg x:(MOVBUreg _))
        // cond:
-       // result: (OR (SLLconst <t> x [c&7])  (SRLconst <t> (ZeroExt8to64  x) [8-c&7]))
+       // result: (MOVDreg x)
        for {
-               t := v.Type
-               c := v.AuxInt
                x := v.Args[0]
-               v.reset(OpARM64OR)
-               v0 := b.NewValue0(v.Line, OpARM64SLLconst, t)
-               v0.AuxInt = c & 7
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
-               v1.AuxInt = 8 - c&7
-               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v2.AddArg(x)
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               if x.Op != OpARM64MOVBUreg {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLsh16x16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh16x16 <t> x y)
+       // match: (MOVWUreg x:(MOVHUreg _))
        // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       // result: (MOVDreg x)
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
+               if x.Op != OpARM64MOVHUreg {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLsh16x32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh16x32 <t> x y)
+       // match: (MOVWUreg x:(MOVWUreg _))
        // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       // result: (MOVDreg x)
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
+               if x.Op != OpARM64MOVWUreg {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWUreg (MOVDconst [c]))
+       // cond:
+       // result: (MOVDconst [int64(uint32(c))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(uint32(c))
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpLsh16x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh16x64  x (MOVDconst [c]))
-       // cond: uint64(c) < 16
-       // result: (SLLconst x [c])
+       // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // cond:
+       // result: (MOVWload [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
                        break
                }
-               c := v_1.AuxInt
-               if !(uint64(c) < 16) {
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64MOVWload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
                        break
                }
-               v.reset(OpARM64SLLconst)
-               v.AuxInt = c
-               v.AddArg(x)
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVWload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (Lsh16x64  _ (MOVDconst [c]))
-       // cond: uint64(c) >= 16
-       // result: (MOVDconst [0])
+       // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
        for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
+               if v_1.Op != OpARM64MOVWstore {
                        break
                }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 16) {
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
                        break
                }
-               v.reset(OpARM64MOVDconst)
-               v.AuxInt = 0
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (Lsh16x64 <t> x y)
+       return false
+}
+func rewriteValueARM64_OpARM64MOVWreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWreg x:(MOVBload _ _))
        // cond:
-       // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+       // result: (MOVDreg x)
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpConst64, t)
-               v1.AuxInt = 0
-               v.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v2.AuxInt = 64
-               v2.AddArg(y)
-               v.AddArg(v2)
+               if x.Op != OpARM64MOVBload {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLsh16x8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh16x8  <t> x y)
+       // match: (MOVWreg x:(MOVBUload _ _))
        // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       // result: (MOVDreg x)
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
+               if x.Op != OpARM64MOVBUload {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLsh32x16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh32x16 <t> x y)
+       // match: (MOVWreg x:(MOVHload _ _))
        // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       // result: (MOVDreg x)
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
+               if x.Op != OpARM64MOVHload {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLsh32x32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh32x32 <t> x y)
+       // match: (MOVWreg x:(MOVHUload _ _))
        // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
-       for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
-               return true
-       }
-}
-func rewriteValueARM64_OpLsh32x64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh32x64  x (MOVDconst [c]))
-       // cond: uint64(c) < 32
-       // result: (SLLconst x [c])
+       // result: (MOVDreg x)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 32) {
+               if x.Op != OpARM64MOVHUload {
                        break
                }
-               v.reset(OpARM64SLLconst)
-               v.AuxInt = c
+               v.reset(OpARM64MOVDreg)
                v.AddArg(x)
                return true
        }
-       // match: (Lsh32x64  _ (MOVDconst [c]))
-       // cond: uint64(c) >= 32
-       // result: (MOVDconst [0])
+       // match: (MOVWreg x:(MOVWload _ _))
+       // cond:
+       // result: (MOVDreg x)
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 32) {
+               x := v.Args[0]
+               if x.Op != OpARM64MOVWload {
                        break
                }
-               v.reset(OpARM64MOVDconst)
-               v.AuxInt = 0
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-       // match: (Lsh32x64 <t> x y)
+       // match: (MOVWreg x:(MOVBreg _))
        // cond:
-       // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+       // result: (MOVDreg x)
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpConst64, t)
-               v1.AuxInt = 0
-               v.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v2.AuxInt = 64
-               v2.AddArg(y)
-               v.AddArg(v2)
+               if x.Op != OpARM64MOVBreg {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLsh32x8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh32x8  <t> x y)
+       // match: (MOVWreg x:(MOVBUreg _))
        // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       // result: (MOVDreg x)
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
+               if x.Op != OpARM64MOVBUreg {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLsh64x16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh64x16 <t> x y)
+       // match: (MOVWreg x:(MOVHreg _))
        // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       // result: (MOVDreg x)
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
+               if x.Op != OpARM64MOVHreg {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLsh64x32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh64x32 <t> x y)
+       // match: (MOVWreg x:(MOVHreg _))
        // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       // result: (MOVDreg x)
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
+               if x.Op != OpARM64MOVHreg {
+                       break
+               }
+               v.reset(OpARM64MOVDreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLsh64x64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh64x64  x (MOVDconst [c]))
-       // cond: uint64(c) < 64
-       // result: (SLLconst x [c])
+       // match: (MOVWreg x:(MOVWreg _))
+       // cond:
+       // result: (MOVDreg x)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 64) {
+               if x.Op != OpARM64MOVWreg {
                        break
                }
-               v.reset(OpARM64SLLconst)
-               v.AuxInt = c
+               v.reset(OpARM64MOVDreg)
                v.AddArg(x)
                return true
        }
-       // match: (Lsh64x64  _ (MOVDconst [c]))
-       // cond: uint64(c) >= 64
-       // result: (MOVDconst [0])
+       // match: (MOVWreg  (MOVDconst [c]))
+       // cond:
+       // result: (MOVDconst [int64(int32(c))])
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 64) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
+               c := v_0.AuxInt
                v.reset(OpARM64MOVDconst)
-               v.AuxInt = 0
-               return true
-       }
-       // match: (Lsh64x64 <t> x y)
-       // cond:
-       // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
-       for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpConst64, t)
-               v1.AuxInt = 0
-               v.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v2.AuxInt = 64
-               v2.AddArg(y)
-               v.AddArg(v2)
+               v.AuxInt = int64(int32(c))
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpLsh64x8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh64x8  <t> x y)
+       // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
        // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       // result: (MOVWstore [off1+off2] {sym} ptr val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVWstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpLsh8x16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh8x16 <t> x y)
+       // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVWstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
        // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       // result: (MOVWstore [off] {sym} ptr x mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVWreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVWstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpLsh8x32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh8x32 <t> x y)
+       // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
        // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       // result: (MOVWstore [off] {sym} ptr x mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVWUreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVWstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpLsh8x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MUL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh8x64   x (MOVDconst [c]))
-       // cond: uint64(c) < 8
-       // result: (SLLconst x [c])
+       // match: (MUL x (MOVDconst [-1]))
+       // cond:
+       // result: (NEG x)
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARM64MOVDconst {
                        break
                }
-               c := v_1.AuxInt
-               if !(uint64(c) < 8) {
+               if v_1.AuxInt != -1 {
                        break
                }
-               v.reset(OpARM64SLLconst)
-               v.AuxInt = c
+               v.reset(OpARM64NEG)
                v.AddArg(x)
                return true
        }
-       // match: (Lsh8x64   _ (MOVDconst [c]))
-       // cond: uint64(c) >= 8
+       // match: (MUL _ (MOVDconst [0]))
+       // cond:
        // result: (MOVDconst [0])
        for {
                v_1 := v.Args[1]
                if v_1.Op != OpARM64MOVDconst {
                        break
                }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 8) {
+               if v_1.AuxInt != 0 {
                        break
                }
                v.reset(OpARM64MOVDconst)
                v.AuxInt = 0
                return true
        }
-       // match: (Lsh8x64 <t> x y)
+       // match: (MUL x (MOVDconst [1]))
        // cond:
-       // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+       // result: x
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpConst64, t)
-               v1.AuxInt = 0
-               v.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v2.AuxInt = 64
-               v2.AddArg(y)
-               v.AddArg(v2)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               if v_1.AuxInt != 1 {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpLsh8x8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh8x8  <t> x y)
-       // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       // match: (MUL x (MOVDconst [c]))
+       // cond: isPowerOfTwo(c)
+       // result: (SLLconst [log2(c)] x)
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpARM64SLLconst)
+               v.AuxInt = log2(c)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpMod16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod16 x y)
+       // match: (MUL (MOVDconst [-1]) x)
        // cond:
-       // result: (MODW (SignExt16to32 x) (SignExt16to32 y))
+       // result: (NEG x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64MODW)
-               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               if v_0.AuxInt != -1 {
+                       break
+               }
+               x := v.Args[1]
+               v.reset(OpARM64NEG)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpMod16u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod16u x y)
+       // match: (MUL (MOVDconst [0]) _)
        // cond:
-       // result: (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y))
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64UMODW)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               if v_0.AuxInt != 0 {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpMod32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod32 x y)
+       // match: (MUL (MOVDconst [1]) x)
        // cond:
-       // result: (MODW x y)
+       // result: x
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64MODW)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               if v_0.AuxInt != 1 {
+                       break
+               }
+               x := v.Args[1]
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM64_OpMod32u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod32u x y)
-       // cond:
-       // result: (UMODW x y)
+       // match: (MUL (MOVDconst [c]) x)
+       // cond: isPowerOfTwo(c)
+       // result: (SLLconst [log2(c)] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64UMODW)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpARM64SLLconst)
+               v.AuxInt = log2(c)
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM64_OpMod64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod64 x y)
+       // match: (MUL   (MOVDconst [c]) (MOVDconst [d]))
        // cond:
-       // result: (MOD x y)
+       // result: (MOVDconst [c*d])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64MOD)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               d := v_1.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = c * d
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpMod64u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MULW(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod64u x y)
-       // cond:
-       // result: (UMOD x y)
+       // match: (MULW x (MOVDconst [c]))
+       // cond: int32(c)==-1
+       // result: (NEG x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64UMOD)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(int32(c) == -1) {
+                       break
+               }
+               v.reset(OpARM64NEG)
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM64_OpMod8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod8 x y)
-       // cond:
-       // result: (MODW (SignExt8to32 x) (SignExt8to32 y))
+       // match: (MULW _ (MOVDconst [c]))
+       // cond: int32(c)==0
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64MODW)
-               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(int32(c) == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpMod8u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod8u x y)
-       // cond:
-       // result: (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y))
+       // match: (MULW x (MOVDconst [c]))
+       // cond: int32(c)==1
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64UMODW)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(int32(c) == 1) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpMove(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Move [s] _ _ mem)
-       // cond: SizeAndAlign(s).Size() == 0
-       // result: mem
+       // match: (MULW x (MOVDconst [c]))
+       // cond: isPowerOfTwo(c)
+       // result: (SLLconst [log2(c)] x)
        for {
-               s := v.AuxInt
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 0) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = mem.Type
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpARM64SLLconst)
+               v.AuxInt = log2(c)
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 1
-       // result: (MOVBstore dst (MOVBUload src mem) mem)
+       // match: (MULW (MOVDconst [c]) x)
+       // cond: int32(c)==-1
+       // result: (NEG x)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 1) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpARM64MOVBstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(int32(c) == -1) {
+                       break
+               }
+               v.reset(OpARM64NEG)
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
-       // result: (MOVHstore dst (MOVHUload src mem) mem)
+       // match: (MULW (MOVDconst [c]) _)
+       // cond: int32(c)==0
+       // result: (MOVDconst [0])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpARM64MOVHstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               if !(int32(c) == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 2
-       // result: (MOVBstore [1] dst (MOVBUload [1] src mem)           (MOVBstore dst (MOVBUload src mem) mem))
+       // match: (MULW (MOVDconst [c]) x)
+       // cond: int32(c)==1
+       // result: x
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 2) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpARM64MOVBstore)
-               v.AuxInt = 1
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
-               v0.AuxInt = 1
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(int32(c) == 1) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
-       // result: (MOVWstore dst (MOVWUload src mem) mem)
+       // match: (MULW (MOVDconst [c]) x)
+       // cond: isPowerOfTwo(c)
+       // result: (SLLconst [log2(c)] x)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpARM64MOVWstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpARM64SLLconst)
+               v.AuxInt = log2(c)
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
-       // result: (MOVHstore [2] dst (MOVHUload [2] src mem)           (MOVHstore dst (MOVHUload src mem) mem))
+       // match: (MULW  (MOVDconst [c]) (MOVDconst [d]))
+       // cond:
+       // result: (MOVDconst [int64(int32(c)*int32(d))])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpARM64MOVHstore)
-               v.AuxInt = 2
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
-               v0.AuxInt = 2
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               c := v_0.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               d := v_1.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(int32(c) * int32(d))
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 4
-       // result: (MOVBstore [3] dst (MOVBUload [3] src mem)           (MOVBstore [2] dst (MOVBUload [2] src mem)                      (MOVBstore [1] dst (MOVBUload [1] src mem)                              (MOVBstore dst (MOVBUload src mem) mem))))
+       return false
+}
+func rewriteValueARM64_OpARM64MVN(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MVN (MOVDconst [c]))
+       // cond:
+       // result: (MOVDconst [^c])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 4) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpARM64MOVBstore)
-               v.AuxInt = 3
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
-               v0.AuxInt = 3
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
-               v1.AuxInt = 2
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
-               v2.AuxInt = 2
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
-               v3.AuxInt = 1
-               v3.AddArg(dst)
-               v4 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
-               v4.AuxInt = 1
-               v4.AddArg(src)
-               v4.AddArg(mem)
-               v3.AddArg(v4)
-               v5 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
-               v5.AddArg(dst)
-               v6 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
-               v6.AddArg(src)
-               v6.AddArg(mem)
-               v5.AddArg(v6)
-               v5.AddArg(mem)
-               v3.AddArg(v5)
-               v1.AddArg(v3)
-               v.AddArg(v1)
+               c := v_0.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = ^c
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
-       // result: (MOVDstore dst (MOVDload src mem) mem)
+       return false
+}
+func rewriteValueARM64_OpARM64NEG(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NEG (MOVDconst [c]))
+       // cond:
+       // result: (MOVDconst [-c])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpARM64MOVDstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = -c
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
-       // result: (MOVWstore [4] dst (MOVWUload [4] src mem)           (MOVWstore dst (MOVWUload src mem) mem))
+       return false
+}
+func rewriteValueARM64_OpARM64NotEqual(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NotEqual (FlagEQ))
+       // cond:
+       // result: (MOVDconst [0])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagEQ {
                        break
                }
-               v.reset(OpARM64MOVWstore)
-               v.AuxInt = 4
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
-               v0.AuxInt = 4
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0
-       // result: (MOVHstore [6] dst (MOVHUload [6] src mem)           (MOVHstore [4] dst (MOVHUload [4] src mem)                      (MOVHstore [2] dst (MOVHUload [2] src mem)                              (MOVHstore dst (MOVHUload src mem) mem))))
+       // match: (NotEqual (FlagLT_ULT))
+       // cond:
+       // result: (MOVDconst [1])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_ULT {
                        break
                }
-               v.reset(OpARM64MOVHstore)
-               v.AuxInt = 6
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
-               v0.AuxInt = 6
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
-               v1.AuxInt = 4
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
-               v2.AuxInt = 4
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
-               v3.AuxInt = 2
-               v3.AddArg(dst)
-               v4 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
-               v4.AuxInt = 2
-               v4.AddArg(src)
-               v4.AddArg(mem)
-               v3.AddArg(v4)
-               v5 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
-               v5.AddArg(dst)
-               v6 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
-               v6.AddArg(src)
-               v6.AddArg(mem)
-               v5.AddArg(v6)
-               v5.AddArg(mem)
-               v3.AddArg(v5)
-               v1.AddArg(v3)
-               v.AddArg(v1)
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 3
-       // result: (MOVBstore [2] dst (MOVBUload [2] src mem)           (MOVBstore [1] dst (MOVBUload [1] src mem)                      (MOVBstore dst (MOVBUload src mem) mem)))
+       // match: (NotEqual (FlagLT_UGT))
+       // cond:
+       // result: (MOVDconst [1])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 3) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagLT_UGT {
                        break
                }
-               v.reset(OpARM64MOVBstore)
-               v.AuxInt = 2
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
-               v0.AuxInt = 2
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
-               v1.AuxInt = 1
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
-               v2.AuxInt = 1
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
-               v3.AddArg(dst)
-               v4 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
-               v4.AddArg(src)
-               v4.AddArg(mem)
-               v3.AddArg(v4)
-               v3.AddArg(mem)
-               v1.AddArg(v3)
-               v.AddArg(v1)
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0
-       // result: (MOVHstore [4] dst (MOVHUload [4] src mem)           (MOVHstore [2] dst (MOVHUload [2] src mem)                      (MOVHstore dst (MOVHUload src mem) mem)))
+       // match: (NotEqual (FlagGT_ULT))
+       // cond:
+       // result: (MOVDconst [1])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_ULT {
                        break
                }
-               v.reset(OpARM64MOVHstore)
-               v.AuxInt = 4
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
-               v0.AuxInt = 4
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
-               v1.AuxInt = 2
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
-               v2.AuxInt = 2
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
-               v3.AddArg(dst)
-               v4 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
-               v4.AddArg(src)
-               v4.AddArg(mem)
-               v3.AddArg(v4)
-               v3.AddArg(mem)
-               v1.AddArg(v3)
-               v.AddArg(v1)
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0
-       // result: (MOVWstore [8] dst (MOVWUload [8] src mem)           (MOVWstore [4] dst (MOVWUload [4] src mem)                      (MOVWstore dst (MOVWUload src mem) mem)))
+       // match: (NotEqual (FlagGT_UGT))
+       // cond:
+       // result: (MOVDconst [1])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64FlagGT_UGT {
                        break
                }
-               v.reset(OpARM64MOVWstore)
-               v.AuxInt = 8
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
-               v0.AuxInt = 8
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
-               v1.AuxInt = 4
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
-               v2.AuxInt = 4
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
-               v3.AddArg(dst)
-               v4 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
-               v4.AddArg(src)
-               v4.AddArg(mem)
-               v3.AddArg(v4)
-               v3.AddArg(mem)
-               v1.AddArg(v3)
-               v.AddArg(v1)
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0
-       // result: (MOVDstore [8] dst (MOVDload [8] src mem)            (MOVDstore dst (MOVDload src mem) mem))
+       // match: (NotEqual (InvertFlags x))
+       // cond:
+       // result: (NotEqual x)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64InvertFlags {
                        break
                }
-               v.reset(OpARM64MOVDstore)
-               v.AuxInt = 8
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
-               v0.AuxInt = 8
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               x := v_0.Args[0]
+               v.reset(OpARM64NotEqual)
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0
-       // result: (MOVDstore [16] dst (MOVDload [16] src mem)          (MOVDstore [8] dst (MOVDload [8] src mem)                       (MOVDstore dst (MOVDload src mem) mem)))
+       return false
+}
+func rewriteValueARM64_OpARM64OR(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (OR  (MOVDconst [c]) x)
+       // cond:
+       // result: (ORconst  [c] x)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpARM64MOVDstore)
-               v.AuxInt = 16
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
-               v0.AuxInt = 16
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
-               v1.AuxInt = 8
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
-               v2.AuxInt = 8
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
-               v3.AddArg(dst)
-               v4 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
-               v4.AddArg(src)
-               v4.AddArg(mem)
-               v3.AddArg(v4)
-               v3.AddArg(mem)
-               v1.AddArg(v3)
-               v.AddArg(v1)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARM64ORconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() > 24 || SizeAndAlign(s).Align()%8 != 0
-       // result: (LoweredMove [SizeAndAlign(s).Align()]               dst             src             (ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])            mem)
+       // match: (OR  x (MOVDconst [c]))
+       // cond:
+       // result: (ORconst  [c] x)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() > 24 || SizeAndAlign(s).Align()%8 != 0) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpARM64LoweredMove)
-               v.AuxInt = SizeAndAlign(s).Align()
-               v.AddArg(dst)
-               v.AddArg(src)
-               v0 := b.NewValue0(v.Line, OpARM64ADDconst, src.Type)
-               v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
-               v0.AddArg(src)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(OpARM64ORconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       return false
-}
-func rewriteValueARM64_OpMul16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mul16 x y)
+       // match: (OR  x x)
        // cond:
-       // result: (MULW x y)
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64MULW)
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpMul32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64ORconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul32 x y)
+       // match: (ORconst  [0]  x)
        // cond:
-       // result: (MULW x y)
+       // result: x
        for {
+               if v.AuxInt != 0 {
+                       break
+               }
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64MULW)
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM64_OpMul32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mul32F x y)
+       // match: (ORconst  [-1] _)
        // cond:
-       // result: (FMULS x y)
+       // result: (MOVDconst [-1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64FMULS)
-               v.AddArg(x)
-               v.AddArg(y)
+               if v.AuxInt != -1 {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = -1
                return true
        }
-}
-func rewriteValueARM64_OpMul64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mul64 x y)
+       // match: (ORconst  [c] (MOVDconst [d]))
        // cond:
-       // result: (MUL x y)
+       // result: (MOVDconst [c|d])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64MUL)
-               v.AddArg(x)
-               v.AddArg(y)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = c | d
                return true
        }
-}
-func rewriteValueARM64_OpMul64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mul64F x y)
+       // match: (ORconst  [c] (ORconst [d] x))
        // cond:
-       // result: (FMULD x y)
+       // result: (ORconst [c|d] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64FMULD)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ORconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARM64ORconst)
+               v.AuxInt = c | d
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpMul8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64SLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul8 x y)
+       // match: (SLL x (MOVDconst [c]))
        // cond:
-       // result: (MULW x y)
+       // result: (SLLconst x [c&63])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64MULW)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARM64SLLconst)
+               v.AuxInt = c & 63
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpNeg16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64SLLconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg16 x)
+       // match: (SLLconst [c] (MOVDconst [d]))
        // cond:
-       // result: (NEG x)
+       // result: (MOVDconst [int64(d)<<uint64(c)])
        for {
-               x := v.Args[0]
-               v.reset(OpARM64NEG)
-               v.AddArg(x)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(d) << uint64(c)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpNeg32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64SRA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg32 x)
+       // match: (SRA x (MOVDconst [c]))
        // cond:
-       // result: (NEG x)
+       // result: (SRAconst x [c&63])
        for {
                x := v.Args[0]
-               v.reset(OpARM64NEG)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = c & 63
                v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpNeg32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64SRAconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg32F x)
+       // match: (SRAconst [c] (MOVDconst [d]))
        // cond:
-       // result: (FNEGS x)
+       // result: (MOVDconst [int64(d)>>uint64(c)])
        for {
-               x := v.Args[0]
-               v.reset(OpARM64FNEGS)
-               v.AddArg(x)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(d) >> uint64(c)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpNeg64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64SRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg64 x)
+       // match: (SRL x (MOVDconst [c]))
        // cond:
-       // result: (NEG x)
+       // result: (SRLconst x [c&63])
        for {
                x := v.Args[0]
-               v.reset(OpARM64NEG)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARM64SRLconst)
+               v.AuxInt = c & 63
                v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpNeg64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64SRLconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg64F x)
+       // match: (SRLconst [c] (MOVDconst [d]))
        // cond:
-       // result: (FNEGD x)
+       // result: (MOVDconst [int64(uint64(d)>>uint64(c))])
        for {
-               x := v.Args[0]
-               v.reset(OpARM64FNEGD)
-               v.AddArg(x)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(uint64(d) >> uint64(c))
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpNeg8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64SUB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg8 x)
+       // match: (SUB x (MOVDconst [c]))
        // cond:
-       // result: (NEG x)
+       // result: (SUBconst [c] x)
        for {
                x := v.Args[0]
-               v.reset(OpARM64NEG)
-               v.AddArg(x)
-               return true
-       }
-}
-func rewriteValueARM64_OpNeq16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neq16 x y)
-       // cond:
-       // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64NotEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARM64SUBconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpNeq32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neq32 x y)
+       // match: (SUB x x)
        // cond:
-       // result: (NotEqual (CMPW x y))
+       // result: (MOVDconst [0])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64NotEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpNeq32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64SUBconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq32F x y)
+       // match: (SUBconst [0]  x)
        // cond:
-       // result: (NotEqual (FCMPS x y))
+       // result: x
        for {
+               if v.AuxInt != 0 {
+                       break
+               }
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64NotEqual)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpNeq64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neq64 x y)
+       // match: (SUBconst [c] (MOVDconst [d]))
        // cond:
-       // result: (NotEqual (CMP x y))
+       // result: (MOVDconst [d-c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64NotEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = d - c
                return true
        }
-}
-func rewriteValueARM64_OpNeq64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neq64F x y)
+       // match: (SUBconst [c] (SUBconst [d] x))
        // cond:
-       // result: (NotEqual (FCMPD x y))
+       // result: (ADDconst [-c-d] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64NotEqual)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64SUBconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARM64ADDconst)
+               v.AuxInt = -c - d
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpNeq8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neq8 x y)
+       // match: (SUBconst [c] (ADDconst [d] x))
        // cond:
-       // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (ADDconst [-c+d] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64NotEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARM64ADDconst)
+               v.AuxInt = -c + d
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpNeqB(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64UDIV(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NeqB x y)
+       // match: (UDIV x (MOVDconst [1]))
        // cond:
-       // result: (XOR x y)
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64XOR)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               if v_1.AuxInt != 1 {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM64_OpNeqPtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (NeqPtr x y)
-       // cond:
-       // result: (NotEqual (CMP x y))
+       // match: (UDIV x (MOVDconst [c]))
+       // cond: isPowerOfTwo(c)
+       // result: (SRLconst [log2(c)] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64NotEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpARM64SRLconst)
+               v.AuxInt = log2(c)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpNilCheck(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (NilCheck ptr mem)
+       // match: (UDIV  (MOVDconst [c]) (MOVDconst [d]))
        // cond:
-       // result: (LoweredNilCheck ptr mem)
+       // result: (MOVDconst [int64(uint64(c)/uint64(d))])
        for {
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64LoweredNilCheck)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               d := v_1.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(uint64(c) / uint64(d))
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpNot(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64UDIVW(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Not x)
-       // cond:
-       // result: (XOR (MOVDconst [1]) x)
+       // match: (UDIVW x (MOVDconst [c]))
+       // cond: uint32(c)==1
+       // result: x
        for {
                x := v.Args[0]
-               v.reset(OpARM64XOR)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 1
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint32(c) == 1) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpOffPtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (OffPtr [off] ptr:(SP))
-       // cond:
-       // result: (MOVDaddr [off] ptr)
+       // match: (UDIVW x (MOVDconst [c]))
+       // cond: isPowerOfTwo(c) && is32Bit(c)
+       // result: (SRLconst [log2(c)] x)
        for {
-               off := v.AuxInt
-               ptr := v.Args[0]
-               if ptr.Op != OpSP {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpARM64MOVDaddr)
-               v.AuxInt = off
-               v.AddArg(ptr)
+               c := v_1.AuxInt
+               if !(isPowerOfTwo(c) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARM64SRLconst)
+               v.AuxInt = log2(c)
+               v.AddArg(x)
                return true
        }
-       // match: (OffPtr [off] ptr)
+       // match: (UDIVW (MOVDconst [c]) (MOVDconst [d]))
        // cond:
-       // result: (ADDconst [off] ptr)
+       // result: (MOVDconst [int64(uint32(c)/uint32(d))])
        for {
-               off := v.AuxInt
-               ptr := v.Args[0]
-               v.reset(OpARM64ADDconst)
-               v.AuxInt = off
-               v.AddArg(ptr)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               d := v_1.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(uint32(c) / uint32(d))
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpOr16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64UMOD(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or16 x y)
+       // match: (UMOD _ (MOVDconst [1]))
        // cond:
-       // result: (OR x y)
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64OR)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               if v_1.AuxInt != 1 {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM64_OpOr32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Or32 x y)
-       // cond:
-       // result: (OR x y)
+       // match: (UMOD x (MOVDconst [c]))
+       // cond: isPowerOfTwo(c)
+       // result: (ANDconst [c-1] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64OR)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpARM64ANDconst)
+               v.AuxInt = c - 1
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM64_OpOr64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Or64 x y)
+       // match: (UMOD  (MOVDconst [c]) (MOVDconst [d]))
        // cond:
-       // result: (OR x y)
+       // result: (MOVDconst [int64(uint64(c)%uint64(d))])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64OR)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               d := v_1.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(uint64(c) % uint64(d))
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpOr8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64UMODW(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or8 x y)
-       // cond:
-       // result: (OR x y)
+       // match: (UMODW _ (MOVDconst [c]))
+       // cond: uint32(c)==1
+       // result: (MOVDconst [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint32(c) == 1) {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (UMODW x (MOVDconst [c]))
+       // cond: isPowerOfTwo(c) && is32Bit(c)
+       // result: (ANDconst [c-1] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64OR)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(isPowerOfTwo(c) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARM64ANDconst)
+               v.AuxInt = c - 1
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
+       // match: (UMODW (MOVDconst [c]) (MOVDconst [d]))
+       // cond:
+       // result: (MOVDconst [int64(uint32(c)%uint32(d))])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               d := v_1.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = int64(uint32(c) % uint32(d))
+               return true
+       }
+       return false
 }
-func rewriteValueARM64_OpOrB(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64XOR(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (OrB x y)
+       // match: (XOR (MOVDconst [c]) x)
        // cond:
-       // result: (OR x y)
+       // result: (XORconst [c] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64OR)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARM64XORconst)
+               v.AuxInt = c
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM64_OpRsh16Ux16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh16Ux16 <t> x y)
+       // match: (XOR x (MOVDconst [c]))
        // cond:
-       // result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       // result: (XORconst [c] x)
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v3 := b.NewValue0(v.Line, OpConst64, t)
-               v3.AuxInt = 0
-               v.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v.AddArg(v4)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARM64XORconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM64_OpRsh16Ux32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh16Ux32 <t> x y)
+       // match: (XOR x x)
        // cond:
-       // result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       // result: (MOVDconst [0])
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v3 := b.NewValue0(v.Line, OpConst64, t)
-               v3.AuxInt = 0
-               v.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v.AddArg(v4)
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpRsh16Ux64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64XORconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16Ux64 x (MOVDconst [c]))
-       // cond: uint64(c) < 16
-       // result: (SRLconst (ZeroExt16to64 x) [c])
+       // match: (XORconst [0]  x)
+       // cond:
+       // result: x
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 16) {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpARM64SRLconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (Rsh16Ux64 _ (MOVDconst [c]))
-       // cond: uint64(c) >= 16
-       // result: (MOVDconst [0])
+       // match: (XORconst [-1] x)
+       // cond:
+       // result: (MVN x)
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
+               if v.AuxInt != -1 {
                        break
                }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 16) {
+               x := v.Args[0]
+               v.reset(OpARM64MVN)
+               v.AddArg(x)
+               return true
+       }
+       // match: (XORconst [c] (MOVDconst [d]))
+       // cond:
+       // result: (MOVDconst [c^d])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDconst {
                        break
                }
+               d := v_0.AuxInt
                v.reset(OpARM64MOVDconst)
-               v.AuxInt = 0
+               v.AuxInt = c ^ d
                return true
        }
-       // match: (Rsh16Ux64 <t> x y)
+       // match: (XORconst [c] (XORconst [d] x))
        // cond:
-       // result: (CSELULT (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+       // result: (XORconst [c^d] x)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v3.AddArg(y)
-               v.AddArg(v3)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64XORconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARM64XORconst)
+               v.AuxInt = c ^ d
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpRsh16Ux8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAdd16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16Ux8  <t> x y)
+       // match: (Add16 x y)
        // cond:
-       // result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       // result: (ADD x y)
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v3 := b.NewValue0(v.Line, OpConst64, t)
-               v3.AuxInt = 0
-               v.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v.AddArg(v4)
+               v.reset(OpARM64ADD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpRsh16x16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAdd32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x16 x y)
+       // match: (Add32 x y)
        // cond:
-       // result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+       // result: (ADD x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SRA)
-               v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-               v3.AuxInt = 63
-               v1.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v1.AddArg(v4)
-               v.AddArg(v1)
+               v.reset(OpARM64ADD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpRsh16x32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAdd32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x32 x y)
+       // match: (Add32F x y)
        // cond:
-       // result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+       // result: (FADDS x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SRA)
-               v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-               v3.AuxInt = 63
-               v1.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v1.AddArg(v4)
-               v.AddArg(v1)
+               v.reset(OpARM64FADDS)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpRsh16x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAdd64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x64  x (MOVDconst [c]))
-       // cond: uint64(c) < 16
-       // result: (SRAconst (SignExt16to64 x) [c])
+       // match: (Add64 x y)
+       // cond:
+       // result: (ADD x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 16) {
-                       break
-               }
-               v.reset(OpARM64SRAconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
+               y := v.Args[1]
+               v.reset(OpARM64ADD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (Rsh16x64 x (MOVDconst [c]))
-       // cond: uint64(c) >= 16
-       // result: (SRAconst (SignExt16to64 x) [63])
+}
+func rewriteValueARM64_OpAdd64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add64F x y)
+       // cond:
+       // result: (FADDD x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 16) {
-                       break
-               }
-               v.reset(OpARM64SRAconst)
-               v.AuxInt = 63
-               v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
+               y := v.Args[1]
+               v.reset(OpARM64FADDD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (Rsh16x64 x y)
+}
+func rewriteValueARM64_OpAdd8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add8 x y)
        // cond:
-       // result: (SRA (SignExt16to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+       // result: (ADD x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SRA)
-               v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-               v1.AddArg(y)
-               v2 := b.NewValue0(v.Line, OpConst64, y.Type)
-               v2.AuxInt = 63
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v3.AddArg(y)
-               v1.AddArg(v3)
-               v.AddArg(v1)
+               v.reset(OpARM64ADD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpRsh16x8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAddPtr(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x8  x y)
+       // match: (AddPtr x y)
        // cond:
-       // result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt8to64  y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64  y))))
+       // result: (ADD x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SRA)
-               v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-               v3.AuxInt = 63
-               v1.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v1.AddArg(v4)
-               v.AddArg(v1)
+               v.reset(OpARM64ADD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpRsh32Ux16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAddr(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux16 <t> x y)
+       // match: (Addr {sym} base)
        // cond:
-       // result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       // result: (MOVDaddr {sym} base)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v3 := b.NewValue0(v.Line, OpConst64, t)
-               v3.AuxInt = 0
-               v.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v.AddArg(v4)
+               sym := v.Aux
+               base := v.Args[0]
+               v.reset(OpARM64MOVDaddr)
+               v.Aux = sym
+               v.AddArg(base)
                return true
        }
 }
-func rewriteValueARM64_OpRsh32Ux32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAnd16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux32 <t> x y)
+       // match: (And16 x y)
        // cond:
-       // result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       // result: (AND x y)
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v3 := b.NewValue0(v.Line, OpConst64, t)
-               v3.AuxInt = 0
-               v.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v.AddArg(v4)
+               v.reset(OpARM64AND)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpRsh32Ux64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAnd32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux64 x (MOVDconst [c]))
-       // cond: uint64(c) < 32
-       // result: (SRLconst (ZeroExt32to64 x) [c])
+       // match: (And32 x y)
+       // cond:
+       // result: (AND x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 32) {
-                       break
-               }
-               v.reset(OpARM64SRLconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               return true
-       }
-       // match: (Rsh32Ux64 _ (MOVDconst [c]))
-       // cond: uint64(c) >= 32
-       // result: (MOVDconst [0])
-       for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 32) {
-                       break
-               }
-               v.reset(OpARM64MOVDconst)
-               v.AuxInt = 0
+               y := v.Args[1]
+               v.reset(OpARM64AND)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (Rsh32Ux64 <t> x y)
+}
+func rewriteValueARM64_OpAnd64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (And64 x y)
        // cond:
-       // result: (CSELULT (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+       // result: (AND x y)
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v3.AddArg(y)
-               v.AddArg(v3)
+               v.reset(OpARM64AND)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpRsh32Ux8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAnd8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux8  <t> x y)
+       // match: (And8 x y)
        // cond:
-       // result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       // result: (AND x y)
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v3 := b.NewValue0(v.Line, OpConst64, t)
-               v3.AuxInt = 0
-               v.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v.AddArg(v4)
+               v.reset(OpARM64AND)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpRsh32x16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAndB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x16 x y)
+       // match: (AndB x y)
        // cond:
-       // result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+       // result: (AND x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SRA)
-               v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-               v3.AuxInt = 63
-               v1.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v1.AddArg(v4)
-               v.AddArg(v1)
+               v.reset(OpARM64AND)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpRsh32x32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAvg64u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x32 x y)
+       // match: (Avg64u <t> x y)
        // cond:
-       // result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+       // result: (ADD (ADD <t> (SRLconst <t> x [1]) (SRLconst <t> y [1])) (AND <t> (AND <t> x y) (MOVDconst [1])))
        for {
+               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SRA)
-               v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v.reset(OpARM64ADD)
+               v0 := b.NewValue0(v.Line, OpARM64ADD, t)
+               v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
+               v1.AuxInt = 1
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpARM64SRLconst, t)
+               v2.AuxInt = 1
                v2.AddArg(y)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-               v3.AuxInt = 63
-               v1.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v1.AddArg(v4)
-               v.AddArg(v1)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpARM64AND, t)
+               v4 := b.NewValue0(v.Line, OpARM64AND, t)
+               v4.AddArg(x)
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v5.AuxInt = 1
+               v3.AddArg(v5)
+               v.AddArg(v3)
                return true
        }
 }
-func rewriteValueARM64_OpRsh32x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpClosureCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x64  x (MOVDconst [c]))
-       // cond: uint64(c) < 32
-       // result: (SRAconst (SignExt32to64 x) [c])
+       // match: (ClosureCall [argwid] entry closure mem)
+       // cond:
+       // result: (CALLclosure [argwid] entry closure mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 32) {
-                       break
-               }
-               v.reset(OpARM64SRAconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
+               argwid := v.AuxInt
+               entry := v.Args[0]
+               closure := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARM64CALLclosure)
+               v.AuxInt = argwid
+               v.AddArg(entry)
+               v.AddArg(closure)
+               v.AddArg(mem)
                return true
        }
-       // match: (Rsh32x64 x (MOVDconst [c]))
-       // cond: uint64(c) >= 32
-       // result: (SRAconst (SignExt32to64 x) [63])
+}
+func rewriteValueARM64_OpCom16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Com16 x)
+       // cond:
+       // result: (MVN x)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 32) {
-                       break
-               }
-               v.reset(OpARM64SRAconst)
-               v.AuxInt = 63
-               v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.reset(OpARM64MVN)
+               v.AddArg(x)
                return true
        }
-       // match: (Rsh32x64 x y)
+}
+func rewriteValueARM64_OpCom32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Com32 x)
        // cond:
-       // result: (SRA (SignExt32to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+       // result: (MVN x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64SRA)
-               v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-               v1.AddArg(y)
-               v2 := b.NewValue0(v.Line, OpConst64, y.Type)
-               v2.AuxInt = 63
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v3.AddArg(y)
-               v1.AddArg(v3)
-               v.AddArg(v1)
+               v.reset(OpARM64MVN)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpRsh32x8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCom64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x8  x y)
+       // match: (Com64 x)
        // cond:
-       // result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt8to64  y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64  y))))
+       // result: (MVN x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64SRA)
-               v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-               v3.AuxInt = 63
-               v1.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v1.AddArg(v4)
-               v.AddArg(v1)
+               v.reset(OpARM64MVN)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpRsh64Ux16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCom8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh64Ux16 <t> x y)
+       // match: (Com8 x)
        // cond:
-       // result: (CSELULT (SRL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       // result: (MVN x)
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
+               v.reset(OpARM64MVN)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpRsh64Ux32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpConst16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh64Ux32 <t> x y)
+       // match: (Const16 [val])
        // cond:
-       // result: (CSELULT (SRL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       // result: (MOVDconst [val])
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
+               val := v.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = val
                return true
        }
 }
-func rewriteValueARM64_OpRsh64Ux64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpConst32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh64Ux64 x (MOVDconst [c]))
-       // cond: uint64(c) < 64
-       // result: (SRLconst x [c])
+       // match: (Const32 [val])
+       // cond:
+       // result: (MOVDconst [val])
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 64) {
-                       break
-               }
-               v.reset(OpARM64SRLconst)
-               v.AuxInt = c
-               v.AddArg(x)
+               val := v.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = val
                return true
        }
-       // match: (Rsh64Ux64 _ (MOVDconst [c]))
-       // cond: uint64(c) >= 64
-       // result: (MOVDconst [0])
+}
+func rewriteValueARM64_OpConst32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Const32F [val])
+       // cond:
+       // result: (FMOVSconst [val])
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 64) {
-                       break
-               }
+               val := v.AuxInt
+               v.reset(OpARM64FMOVSconst)
+               v.AuxInt = val
+               return true
+       }
+}
+func rewriteValueARM64_OpConst64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Const64 [val])
+       // cond:
+       // result: (MOVDconst [val])
+       for {
+               val := v.AuxInt
                v.reset(OpARM64MOVDconst)
-               v.AuxInt = 0
+               v.AuxInt = val
                return true
        }
-       // match: (Rsh64Ux64 <t> x y)
+}
+func rewriteValueARM64_OpConst64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Const64F [val])
        // cond:
-       // result: (CSELULT (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+       // result: (FMOVDconst [val])
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpConst64, t)
-               v1.AuxInt = 0
-               v.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v2.AuxInt = 64
-               v2.AddArg(y)
-               v.AddArg(v2)
+               val := v.AuxInt
+               v.reset(OpARM64FMOVDconst)
+               v.AuxInt = val
                return true
        }
 }
-func rewriteValueARM64_OpRsh64Ux8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpConst8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh64Ux8  <t> x y)
+       // match: (Const8 [val])
        // cond:
-       // result: (CSELULT (SRL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       // result: (MOVDconst [val])
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
+               val := v.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = val
                return true
        }
 }
-func rewriteValueARM64_OpRsh64x16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpConstBool(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh64x16 x y)
+       // match: (ConstBool [b])
        // cond:
-       // result: (SRA x (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+       // result: (MOVDconst [b])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64SRA)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpConst64, y.Type)
-               v2.AuxInt = 63
-               v0.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v0.AddArg(v3)
-               v.AddArg(v0)
+               b := v.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = b
                return true
        }
 }
-func rewriteValueARM64_OpRsh64x32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpConstNil(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh64x32 x y)
+       // match: (ConstNil)
        // cond:
-       // result: (SRA x (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+       // result: (MOVDconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64SRA)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpConst64, y.Type)
-               v2.AuxInt = 63
-               v0.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v0.AddArg(v3)
-               v.AddArg(v0)
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
 }
-func rewriteValueARM64_OpRsh64x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpConvert(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh64x64  x (MOVDconst [c]))
-       // cond: uint64(c) < 64
-       // result: (SRAconst x [c])
+       // match: (Convert x mem)
+       // cond:
+       // result: (MOVDconvert x mem)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 64) {
-                       break
-               }
-               v.reset(OpARM64SRAconst)
-               v.AuxInt = c
+               mem := v.Args[1]
+               v.reset(OpARM64MOVDconvert)
                v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
-       // match: (Rsh64x64 x (MOVDconst [c]))
-       // cond: uint64(c) >= 64
-       // result: (SRAconst x [63])
+}
+func rewriteValueARM64_OpCvt32Fto32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt32Fto32 x)
+       // cond:
+       // result: (FCVTZSSW x)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 64) {
-                       break
-               }
-               v.reset(OpARM64SRAconst)
-               v.AuxInt = 63
+               v.reset(OpARM64FCVTZSSW)
                v.AddArg(x)
                return true
        }
-       // match: (Rsh64x64 x y)
+}
+func rewriteValueARM64_OpCvt32Fto32U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt32Fto32U x)
        // cond:
-       // result: (SRA x (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+       // result: (FCVTZUSW x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64SRA)
+               v.reset(OpARM64FCVTZUSW)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-               v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpConst64, y.Type)
-               v1.AuxInt = 63
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v2.AuxInt = 64
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpRsh64x8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt32Fto64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh64x8  x y)
+       // match: (Cvt32Fto64 x)
        // cond:
-       // result: (SRA x (CSELULT <y.Type> (ZeroExt8to64  y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64  y))))
+       // result: (FCVTZSS x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64SRA)
+               v.reset(OpARM64FCVTZSS)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpConst64, y.Type)
-               v2.AuxInt = 63
-               v0.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v0.AddArg(v3)
-               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpRsh8Ux16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt32Fto64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux16 <t> x y)
+       // match: (Cvt32Fto64F x)
        // cond:
-       // result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       // result: (FCVTSD x)
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v3 := b.NewValue0(v.Line, OpConst64, t)
-               v3.AuxInt = 0
-               v.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v.AddArg(v4)
+               v.reset(OpARM64FCVTSD)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpRsh8Ux32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt32Uto32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux32 <t> x y)
+       // match: (Cvt32Uto32F x)
        // cond:
-       // result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       // result: (UCVTFWS x)
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v3 := b.NewValue0(v.Line, OpConst64, t)
-               v3.AuxInt = 0
-               v.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v.AddArg(v4)
+               v.reset(OpARM64UCVTFWS)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpRsh8Ux64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt32Uto64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux64  x (MOVDconst [c]))
-       // cond: uint64(c) < 8
-       // result: (SRLconst (ZeroExt8to64  x) [c])
+       // match: (Cvt32Uto64F x)
+       // cond:
+       // result: (UCVTFWD x)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 8) {
-                       break
-               }
-               v.reset(OpARM64SRLconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.reset(OpARM64UCVTFWD)
+               v.AddArg(x)
                return true
        }
-       // match: (Rsh8Ux64  _ (MOVDconst [c]))
-       // cond: uint64(c) >= 8
-       // result: (MOVDconst [0])
+}
+func rewriteValueARM64_OpCvt32to32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt32to32F x)
+       // cond:
+       // result: (SCVTFWS x)
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 8) {
-                       break
-               }
-               v.reset(OpARM64MOVDconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               v.reset(OpARM64SCVTFWS)
+               v.AddArg(x)
                return true
        }
-       // match: (Rsh8Ux64 <t> x y)
+}
+func rewriteValueARM64_OpCvt32to64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt32to64F x)
        // cond:
-       // result: (CSELULT (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+       // result: (SCVTFWD x)
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v3.AddArg(y)
-               v.AddArg(v3)
+               v.reset(OpARM64SCVTFWD)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpRsh8Ux8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt64Fto32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux8  <t> x y)
+       // match: (Cvt64Fto32 x)
        // cond:
-       // result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       // result: (FCVTZSDW x)
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v3 := b.NewValue0(v.Line, OpConst64, t)
-               v3.AuxInt = 0
-               v.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v.AddArg(v4)
+               v.reset(OpARM64FCVTZSDW)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpRsh8x16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt64Fto32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x16 x y)
+       // match: (Cvt64Fto32F x)
        // cond:
-       // result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+       // result: (FCVTDS x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64SRA)
-               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-               v3.AuxInt = 63
-               v1.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v1.AddArg(v4)
-               v.AddArg(v1)
+               v.reset(OpARM64FCVTDS)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpRsh8x32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt64Fto32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x32 x y)
+       // match: (Cvt64Fto32U x)
        // cond:
-       // result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+       // result: (FCVTZUDW x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64SRA)
-               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-               v3.AuxInt = 63
-               v1.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v1.AddArg(v4)
-               v.AddArg(v1)
+               v.reset(OpARM64FCVTZUDW)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpRsh8x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt64Fto64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x64   x (MOVDconst [c]))
-       // cond: uint64(c) < 8
-       // result: (SRAconst (SignExt8to64  x) [c])
+       // match: (Cvt64Fto64 x)
+       // cond:
+       // result: (FCVTZSD x)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 8) {
-                       break
-               }
-               v.reset(OpARM64SRAconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.reset(OpARM64FCVTZSD)
+               v.AddArg(x)
                return true
        }
-       // match: (Rsh8x64  x (MOVDconst [c]))
-       // cond: uint64(c) >= 8
-       // result: (SRAconst (SignExt8to64  x) [63])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 8) {
-                       break
-               }
-               v.reset(OpARM64SRAconst)
-               v.AuxInt = 63
-               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
+}
+func rewriteValueARM64_OpCvt64to32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt64to32F x)
+       // cond:
+       // result: (SCVTFS x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64SCVTFS)
+               v.AddArg(x)
                return true
        }
-       // match: (Rsh8x64 x y)
+}
+func rewriteValueARM64_OpCvt64to64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt64to64F x)
        // cond:
-       // result: (SRA (SignExt8to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+       // result: (SCVTFD x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64SCVTFD)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpDeferCall(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (DeferCall [argwid] mem)
+       // cond:
+       // result: (CALLdefer [argwid] mem)
+       for {
+               argwid := v.AuxInt
+               mem := v.Args[0]
+               v.reset(OpARM64CALLdefer)
+               v.AuxInt = argwid
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValueARM64_OpDiv16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div16 x y)
+       // cond:
+       // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SRA)
-               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v.reset(OpARM64DIVW)
+               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
                v0.AddArg(x)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
                v1.AddArg(y)
-               v2 := b.NewValue0(v.Line, OpConst64, y.Type)
-               v2.AuxInt = 63
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v3.AddArg(y)
-               v1.AddArg(v3)
                v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM64_OpRsh8x8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv16u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x8  x y)
+       // match: (Div16u x y)
        // cond:
-       // result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt8to64  y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64  y))))
+       // result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SRA)
-               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v.reset(OpARM64UDIVW)
+               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
                v0.AddArg(x)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v2.AddArg(y)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-               v3.AuxInt = 63
-               v1.AddArg(v3)
-               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v4.AuxInt = 64
-               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v5.AddArg(y)
-               v4.AddArg(v5)
-               v1.AddArg(v4)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
                v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM64_OpSignExt16to32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SignExt16to32 x)
+       // match: (Div32 x y)
        // cond:
-       // result: (MOVHreg x)
+       // result: (DIVW x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64MOVHreg)
+               y := v.Args[1]
+               v.reset(OpARM64DIVW)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpSignExt16to64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SignExt16to64 x)
+       // match: (Div32F x y)
        // cond:
-       // result: (MOVHreg x)
+       // result: (FDIVS x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64MOVHreg)
+               y := v.Args[1]
+               v.reset(OpARM64FDIVS)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpSignExt32to64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv32u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SignExt32to64 x)
+       // match: (Div32u x y)
        // cond:
-       // result: (MOVWreg x)
+       // result: (UDIVW x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64MOVWreg)
+               y := v.Args[1]
+               v.reset(OpARM64UDIVW)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpSignExt8to16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SignExt8to16 x)
+       // match: (Div64 x y)
        // cond:
-       // result: (MOVBreg x)
+       // result: (DIV x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64MOVBreg)
+               y := v.Args[1]
+               v.reset(OpARM64DIV)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpSignExt8to32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SignExt8to32 x)
+       // match: (Div64F x y)
        // cond:
-       // result: (MOVBreg x)
+       // result: (FDIVD x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64MOVBreg)
+               y := v.Args[1]
+               v.reset(OpARM64FDIVD)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpSignExt8to64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv64u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SignExt8to64 x)
+       // match: (Div64u x y)
        // cond:
-       // result: (MOVBreg x)
+       // result: (UDIV x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64MOVBreg)
+               y := v.Args[1]
+               v.reset(OpARM64UDIV)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpSqrt(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sqrt x)
+       // match: (Div8 x y)
        // cond:
-       // result: (FSQRTD x)
+       // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
        for {
                x := v.Args[0]
-               v.reset(OpARM64FSQRTD)
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARM64DIVW)
+               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM64_OpStaticCall(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv8u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (StaticCall [argwid] {target} mem)
+       // match: (Div8u x y)
        // cond:
-       // result: (CALLstatic [argwid] {target} mem)
+       // result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
        for {
-               argwid := v.AuxInt
-               target := v.Aux
-               mem := v.Args[0]
-               v.reset(OpARM64CALLstatic)
-               v.AuxInt = argwid
-               v.Aux = target
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64UDIVW)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM64_OpStore(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEq16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Store [1] ptr val mem)
+       // match: (Eq16 x y)
        // cond:
-       // result: (MOVBstore ptr val mem)
+       // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
        for {
-               if v.AuxInt != 1 {
-                       break
-               }
-               ptr := v.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARM64MOVBstore)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64Equal)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       // match: (Store [2] ptr val mem)
+}
+func rewriteValueARM64_OpEq32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq32 x y)
        // cond:
-       // result: (MOVHstore ptr val mem)
+       // result: (Equal (CMPW x y))
        for {
-               if v.AuxInt != 2 {
-                       break
-               }
-               ptr := v.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARM64MOVHstore)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64Equal)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (Store [4] ptr val mem)
-       // cond: !is32BitFloat(val.Type)
-       // result: (MOVWstore ptr val mem)
+}
+func rewriteValueARM64_OpEq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq32F x y)
+       // cond:
+       // result: (Equal (FCMPS x y))
        for {
-               if v.AuxInt != 4 {
-                       break
-               }
-               ptr := v.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(!is32BitFloat(val.Type)) {
-                       break
-               }
-               v.reset(OpARM64MOVWstore)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Store [8] ptr val mem)
-       // cond: !is64BitFloat(val.Type)
-       // result: (MOVDstore ptr val mem)
-       for {
-               if v.AuxInt != 8 {
-                       break
-               }
-               ptr := v.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(!is64BitFloat(val.Type)) {
-                       break
-               }
-               v.reset(OpARM64MOVDstore)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Store [4] ptr val mem)
-       // cond: is32BitFloat(val.Type)
-       // result: (FMOVSstore ptr val mem)
-       for {
-               if v.AuxInt != 4 {
-                       break
-               }
-               ptr := v.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32BitFloat(val.Type)) {
-                       break
-               }
-               v.reset(OpARM64FMOVSstore)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Store [8] ptr val mem)
-       // cond: is64BitFloat(val.Type)
-       // result: (FMOVDstore ptr val mem)
-       for {
-               if v.AuxInt != 8 {
-                       break
-               }
-               ptr := v.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is64BitFloat(val.Type)) {
-                       break
-               }
-               v.reset(OpARM64FMOVDstore)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64Equal)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueARM64_OpSub16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEq64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sub16 x y)
+       // match: (Eq64 x y)
        // cond:
-       // result: (SUB x y)
+       // result: (Equal (CMP x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SUB)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARM64Equal)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpSub32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEq64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sub32 x y)
+       // match: (Eq64F x y)
        // cond:
-       // result: (SUB x y)
+       // result: (Equal (FCMPD x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SUB)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARM64Equal)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpSub32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEq8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sub32F x y)
+       // match: (Eq8 x y)
        // cond:
-       // result: (FSUBS x y)
+       // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64FSUBS)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARM64Equal)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpSub64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEqB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sub64 x y)
+       // match: (EqB x y)
        // cond:
-       // result: (SUB x y)
+       // result: (XOR (MOVDconst [1]) (XOR <config.fe.TypeBool()> x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SUB)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARM64XOR)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 1
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64XOR, config.fe.TypeBool())
+               v1.AddArg(x)
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM64_OpSub64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEqPtr(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sub64F x y)
+       // match: (EqPtr x y)
        // cond:
-       // result: (FSUBD x y)
+       // result: (Equal (CMP x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64FSUBD)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARM64Equal)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpSub8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sub8 x y)
+       // match: (Geq16 x y)
        // cond:
-       // result: (SUB x y)
+       // result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SUB)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARM64GreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpSubPtr(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq16U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SubPtr x y)
+       // match: (Geq16U x y)
        // cond:
-       // result: (SUB x y)
+       // result: (GreaterEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SUB)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARM64GreaterEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpTrunc16to8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Trunc16to8 x)
+       // match: (Geq32 x y)
        // cond:
-       // result: x
+       // result: (GreaterEqual (CMPW x y))
        for {
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARM64GreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpTrunc32to16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Trunc32to16 x)
+       // match: (Geq32F x y)
        // cond:
-       // result: x
+       // result: (GreaterEqual (FCMPS x y))
        for {
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARM64GreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpTrunc32to8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Trunc32to8 x)
+       // match: (Geq32U x y)
        // cond:
-       // result: x
+       // result: (GreaterEqualU (CMPW x y))
        for {
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARM64GreaterEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpTrunc64to16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Trunc64to16 x)
+       // match: (Geq64 x y)
        // cond:
-       // result: x
+       // result: (GreaterEqual (CMP x y))
        for {
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARM64GreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpTrunc64to32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Trunc64to32 x)
+       // match: (Geq64F x y)
        // cond:
-       // result: x
+       // result: (GreaterEqual (FCMPD x y))
        for {
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARM64GreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpTrunc64to8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq64U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Trunc64to8 x)
+       // match: (Geq64U x y)
        // cond:
-       // result: x
+       // result: (GreaterEqualU (CMP x y))
        for {
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARM64GreaterEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpXor16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Xor16 x y)
+       // match: (Geq8 x y)
        // cond:
-       // result: (XOR x y)
+       // result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64XOR)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARM64GreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpXor32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq8U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Xor32 x y)
+       // match: (Geq8U x y)
        // cond:
-       // result: (XOR x y)
+       // result: (GreaterEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64XOR)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARM64GreaterEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpXor64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGetClosurePtr(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Xor64 x y)
+       // match: (GetClosurePtr)
        // cond:
-       // result: (XOR x y)
+       // result: (LoweredGetClosurePtr)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64XOR)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARM64LoweredGetClosurePtr)
                return true
        }
 }
-func rewriteValueARM64_OpXor8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGoCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Xor8 x y)
+       // match: (GoCall [argwid] mem)
        // cond:
-       // result: (XOR x y)
+       // result: (CALLgo [argwid] mem)
+       for {
+               argwid := v.AuxInt
+               mem := v.Args[0]
+               v.reset(OpARM64CALLgo)
+               v.AuxInt = argwid
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValueARM64_OpGreater16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater16 x y)
+       // cond:
+       // result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64XOR)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARM64GreaterThan)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpZero(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater16U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Zero [s] _ mem)
-       // cond: SizeAndAlign(s).Size() == 0
-       // result: mem
+       // match: (Greater16U x y)
+       // cond:
+       // result: (GreaterThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
        for {
-               s := v.AuxInt
-               mem := v.Args[1]
-               if !(SizeAndAlign(s).Size() == 0) {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = mem.Type
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64GreaterThanU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       // match: (Zero [s] ptr mem)
-       // cond: SizeAndAlign(s).Size() == 1
-       // result: (MOVBstore ptr (MOVDconst [0]) mem)
+}
+func rewriteValueARM64_OpGreater32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater32 x y)
+       // cond:
+       // result: (GreaterThan (CMPW x y))
        for {
-               s := v.AuxInt
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(SizeAndAlign(s).Size() == 1) {
-                       break
-               }
-               v.reset(OpARM64MOVBstore)
-               v.AddArg(ptr)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64GreaterThan)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v.AddArg(mem)
                return true
        }
-       // match: (Zero [s] ptr mem)
-       // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
-       // result: (MOVHstore ptr (MOVDconst [0]) mem)
+}
+func rewriteValueARM64_OpGreater32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater32F x y)
+       // cond:
+       // result: (GreaterThan (FCMPS x y))
        for {
-               s := v.AuxInt
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
-                       break
-               }
-               v.reset(OpARM64MOVHstore)
-               v.AddArg(ptr)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64GreaterThan)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v.AddArg(mem)
                return true
        }
-       // match: (Zero [s] ptr mem)
-       // cond: SizeAndAlign(s).Size() == 2
-       // result: (MOVBstore [1] ptr (MOVDconst [0])           (MOVBstore ptr (MOVDconst [0]) mem))
+}
+func rewriteValueARM64_OpGreater32U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater32U x y)
+       // cond:
+       // result: (GreaterThanU (CMPW x y))
        for {
-               s := v.AuxInt
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(SizeAndAlign(s).Size() == 2) {
-                       break
-               }
-               v.reset(OpARM64MOVBstore)
-               v.AuxInt = 1
-               v.AddArg(ptr)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64GreaterThanU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
-               v1.AddArg(ptr)
-               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v2.AuxInt = 0
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
                return true
        }
-       // match: (Zero [s] ptr mem)
-       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
-       // result: (MOVWstore ptr (MOVDconst [0]) mem)
+}
+func rewriteValueARM64_OpGreater64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater64 x y)
+       // cond:
+       // result: (GreaterThan (CMP x y))
        for {
-               s := v.AuxInt
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
-                       break
-               }
-               v.reset(OpARM64MOVWstore)
-               v.AddArg(ptr)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64GreaterThan)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v.AddArg(mem)
                return true
        }
-       // match: (Zero [s] ptr mem)
-       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
-       // result: (MOVHstore [2] ptr (MOVDconst [0])           (MOVHstore ptr (MOVDconst [0]) mem))
+}
+func rewriteValueARM64_OpGreater64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater64F x y)
+       // cond:
+       // result: (GreaterThan (FCMPD x y))
        for {
-               s := v.AuxInt
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
-                       break
-               }
-               v.reset(OpARM64MOVHstore)
-               v.AuxInt = 2
-               v.AddArg(ptr)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64GreaterThan)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
-               v1.AddArg(ptr)
-               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v2.AuxInt = 0
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
                return true
        }
-       // match: (Zero [s] ptr mem)
-       // cond: SizeAndAlign(s).Size() == 4
-       // result: (MOVBstore [3] ptr (MOVDconst [0])           (MOVBstore [2] ptr (MOVDconst [0])                      (MOVBstore [1] ptr (MOVDconst [0])                              (MOVBstore ptr (MOVDconst [0]) mem))))
+}
+func rewriteValueARM64_OpGreater64U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater64U x y)
+       // cond:
+       // result: (GreaterThanU (CMP x y))
        for {
-               s := v.AuxInt
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(SizeAndAlign(s).Size() == 4) {
-                       break
-               }
-               v.reset(OpARM64MOVBstore)
-               v.AuxInt = 3
-               v.AddArg(ptr)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64GreaterThanU)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
-               v1.AuxInt = 2
-               v1.AddArg(ptr)
-               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v2.AuxInt = 0
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
-               v3.AuxInt = 1
-               v3.AddArg(ptr)
-               v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v4.AuxInt = 0
-               v3.AddArg(v4)
-               v5 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
-               v5.AddArg(ptr)
-               v6 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v6.AuxInt = 0
-               v5.AddArg(v6)
-               v5.AddArg(mem)
-               v3.AddArg(v5)
-               v1.AddArg(v3)
-               v.AddArg(v1)
                return true
        }
-       // match: (Zero [s] ptr mem)
-       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
-       // result: (MOVDstore ptr (MOVDconst [0]) mem)
+}
+func rewriteValueARM64_OpGreater8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater8 x y)
+       // cond:
+       // result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
        for {
-               s := v.AuxInt
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
-                       break
-               }
-               v.reset(OpARM64MOVDstore)
-               v.AddArg(ptr)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64GreaterThan)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
-               v.AddArg(mem)
                return true
        }
-       // match: (Zero [s] ptr mem)
-       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
-       // result: (MOVWstore [4] ptr (MOVDconst [0])           (MOVWstore ptr (MOVDconst [0]) mem))
+}
+func rewriteValueARM64_OpGreater8U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater8U x y)
+       // cond:
+       // result: (GreaterThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
        for {
-               s := v.AuxInt
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
-                       break
-               }
-               v.reset(OpARM64MOVWstore)
-               v.AuxInt = 4
-               v.AddArg(ptr)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64GreaterThanU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
-               v1.AddArg(ptr)
-               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v2.AuxInt = 0
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
                return true
        }
-       // match: (Zero [s] ptr mem)
-       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0
-       // result: (MOVHstore [6] ptr (MOVDconst [0])           (MOVHstore [4] ptr (MOVDconst [0])                      (MOVHstore [2] ptr (MOVDconst [0])                              (MOVHstore ptr (MOVDconst [0]) mem))))
+}
+func rewriteValueARM64_OpHmul16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul16 x y)
+       // cond:
+       // result: (SRAconst (MULW <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
        for {
-               s := v.AuxInt
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0) {
-                       break
-               }
-               v.reset(OpARM64MOVHstore)
-               v.AuxInt = 6
-               v.AddArg(ptr)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = 16
+               v0 := b.NewValue0(v.Line, OpARM64MULW, config.fe.TypeInt32())
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
-               v1.AuxInt = 4
-               v1.AddArg(ptr)
-               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v2.AuxInt = 0
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
-               v3.AuxInt = 2
-               v3.AddArg(ptr)
-               v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v4.AuxInt = 0
-               v3.AddArg(v4)
-               v5 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
-               v5.AddArg(ptr)
-               v6 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v6.AuxInt = 0
-               v5.AddArg(v6)
-               v5.AddArg(mem)
-               v3.AddArg(v5)
-               v1.AddArg(v3)
-               v.AddArg(v1)
                return true
        }
-       // match: (Zero [s] ptr mem)
-       // cond: SizeAndAlign(s).Size() == 3
-       // result: (MOVBstore [2] ptr (MOVDconst [0])           (MOVBstore [1] ptr (MOVDconst [0])                      (MOVBstore ptr (MOVDconst [0]) mem)))
+}
+func rewriteValueARM64_OpHmul16u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul16u x y)
+       // cond:
+       // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
        for {
-               s := v.AuxInt
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(SizeAndAlign(s).Size() == 3) {
-                       break
-               }
-               v.reset(OpARM64MOVBstore)
-               v.AuxInt = 2
-               v.AddArg(ptr)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRLconst)
+               v.AuxInt = 16
+               v0 := b.NewValue0(v.Line, OpARM64MUL, config.fe.TypeUInt32())
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
-               v1.AuxInt = 1
-               v1.AddArg(ptr)
-               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v2.AuxInt = 0
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
-               v3.AddArg(ptr)
-               v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v4.AuxInt = 0
-               v3.AddArg(v4)
-               v3.AddArg(mem)
-               v1.AddArg(v3)
-               v.AddArg(v1)
                return true
        }
-       // match: (Zero [s] ptr mem)
-       // cond: SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0
-       // result: (MOVHstore [4] ptr (MOVDconst [0])           (MOVHstore [2] ptr (MOVDconst [0])                      (MOVHstore ptr (MOVDconst [0]) mem)))
+}
+func rewriteValueARM64_OpHmul32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul32 x y)
+       // cond:
+       // result: (SRAconst (MULL <config.fe.TypeInt64()> x y) [32])
        for {
-               s := v.AuxInt
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0) {
-                       break
-               }
-               v.reset(OpARM64MOVHstore)
-               v.AuxInt = 4
-               v.AddArg(ptr)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
-               v1.AuxInt = 2
-               v1.AddArg(ptr)
-               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v2.AuxInt = 0
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
-               v3.AddArg(ptr)
-               v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v4.AuxInt = 0
-               v3.AddArg(v4)
-               v3.AddArg(mem)
-               v1.AddArg(v3)
-               v.AddArg(v1)
-               return true
-       }
-       // match: (Zero [s] ptr mem)
-       // cond: SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0
-       // result: (MOVWstore [8] ptr (MOVDconst [0])           (MOVWstore [4] ptr (MOVDconst [0])                      (MOVWstore ptr (MOVDconst [0]) mem)))
-       for {
-               s := v.AuxInt
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0) {
-                       break
-               }
-               v.reset(OpARM64MOVWstore)
-               v.AuxInt = 8
-               v.AddArg(ptr)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
-               v1.AuxInt = 4
-               v1.AddArg(ptr)
-               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v2.AuxInt = 0
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
-               v3.AddArg(ptr)
-               v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v4.AuxInt = 0
-               v3.AddArg(v4)
-               v3.AddArg(mem)
-               v1.AddArg(v3)
-               v.AddArg(v1)
-               return true
-       }
-       // match: (Zero [s] ptr mem)
-       // cond: SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0
-       // result: (MOVDstore [8] ptr (MOVDconst [0])           (MOVDstore ptr (MOVDconst [0]) mem))
-       for {
-               s := v.AuxInt
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0) {
-                       break
-               }
-               v.reset(OpARM64MOVDstore)
-               v.AuxInt = 8
-               v.AddArg(ptr)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
-               v1.AddArg(ptr)
-               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v2.AuxInt = 0
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
-               return true
-       }
-       // match: (Zero [s] ptr mem)
-       // cond: SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0
-       // result: (MOVDstore [16] ptr (MOVDconst [0])          (MOVDstore [8] ptr (MOVDconst [0])                      (MOVDstore ptr (MOVDconst [0]) mem)))
-       for {
-               s := v.AuxInt
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0) {
-                       break
-               }
-               v.reset(OpARM64MOVDstore)
-               v.AuxInt = 16
-               v.AddArg(ptr)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = 32
+               v0 := b.NewValue0(v.Line, OpARM64MULL, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
-               v1.AuxInt = 8
-               v1.AddArg(ptr)
-               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v2.AuxInt = 0
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
-               v3.AddArg(ptr)
-               v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v4.AuxInt = 0
-               v3.AddArg(v4)
-               v3.AddArg(mem)
-               v1.AddArg(v3)
-               v.AddArg(v1)
-               return true
-       }
-       // match: (Zero [s] ptr mem)
-       // cond: SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size() <= 8*128        && SizeAndAlign(s).Align()%8 == 0 && !config.noDuffDevice
-       // result: (DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/8))] ptr mem)
-       for {
-               s := v.AuxInt
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size() <= 8*128 && SizeAndAlign(s).Align()%8 == 0 && !config.noDuffDevice) {
-                       break
-               }
-               v.reset(OpARM64DUFFZERO)
-               v.AuxInt = 4 * (128 - int64(SizeAndAlign(s).Size()/8))
-               v.AddArg(ptr)
-               v.AddArg(mem)
                return true
        }
-       // match: (Zero [s] ptr mem)
-       // cond: (SizeAndAlign(s).Size() > 8*128 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0
-       // result: (LoweredZero [SizeAndAlign(s).Align()]               ptr             (ADDconst <ptr.Type> [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)] ptr)            mem)
+}
+func rewriteValueARM64_OpHmul32u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul32u x y)
+       // cond:
+       // result: (SRAconst (UMULL <config.fe.TypeUInt64()> x y) [32])
        for {
-               s := v.AuxInt
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !((SizeAndAlign(s).Size() > 8*128 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0) {
-                       break
-               }
-               v.reset(OpARM64LoweredZero)
-               v.AuxInt = SizeAndAlign(s).Align()
-               v.AddArg(ptr)
-               v0 := b.NewValue0(v.Line, OpARM64ADDconst, ptr.Type)
-               v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
-               v0.AddArg(ptr)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = 32
+               v0 := b.NewValue0(v.Line, OpARM64UMULL, config.fe.TypeUInt64())
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v.AddArg(mem)
                return true
        }
-       return false
 }
-func rewriteValueARM64_OpZeroExt16to32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpHmul64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ZeroExt16to32 x)
+       // match: (Hmul64 x y)
        // cond:
-       // result: (MOVHUreg x)
+       // result: (MULH x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64MOVHUreg)
+               y := v.Args[1]
+               v.reset(OpARM64MULH)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpZeroExt16to64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpHmul64u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ZeroExt16to64 x)
+       // match: (Hmul64u x y)
        // cond:
-       // result: (MOVHUreg x)
+       // result: (UMULH x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64MOVHUreg)
+               y := v.Args[1]
+               v.reset(OpARM64UMULH)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpZeroExt32to64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpHmul8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ZeroExt32to64 x)
+       // match: (Hmul8 x y)
        // cond:
-       // result: (MOVWUreg x)
+       // result: (SRAconst (MULW <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
        for {
                x := v.Args[0]
-               v.reset(OpARM64MOVWUreg)
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = 8
+               v0 := b.NewValue0(v.Line, OpARM64MULW, config.fe.TypeInt16())
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpZeroExt8to16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpHmul8u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ZeroExt8to16 x)
+       // match: (Hmul8u x y)
        // cond:
-       // result: (MOVBUreg x)
+       // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
        for {
                x := v.Args[0]
-               v.reset(OpARM64MOVBUreg)
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARM64SRLconst)
+               v.AuxInt = 8
+               v0 := b.NewValue0(v.Line, OpARM64MUL, config.fe.TypeUInt16())
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpZeroExt8to32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpInterCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ZeroExt8to32 x)
+       // match: (InterCall [argwid] entry mem)
        // cond:
-       // result: (MOVBUreg x)
+       // result: (CALLinter [argwid] entry mem)
        for {
-               x := v.Args[0]
-               v.reset(OpARM64MOVBUreg)
-               v.AddArg(x)
+               argwid := v.AuxInt
+               entry := v.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64CALLinter)
+               v.AuxInt = argwid
+               v.AddArg(entry)
+               v.AddArg(mem)
                return true
        }
 }
-func rewriteValueARM64_OpZeroExt8to64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpIsInBounds(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ZeroExt8to64 x)
+       // match: (IsInBounds idx len)
        // cond:
-       // result: (MOVBUreg x)
+       // result: (LessThanU (CMP idx len))
        for {
-               x := v.Args[0]
-               v.reset(OpARM64MOVBUreg)
-               v.AddArg(x)
+               idx := v.Args[0]
+               len := v.Args[1]
+               v.reset(OpARM64LessThanU)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(idx)
+               v0.AddArg(len)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteBlockARM64(b *Block) bool {
-       switch b.Kind {
-       case BlockIf:
-               // match: (If (Equal cc) yes no)
+func rewriteValueARM64_OpIsNonNil(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (IsNonNil ptr)
+       // cond:
+       // result: (NotEqual (CMPconst [0] ptr))
+       for {
+               ptr := v.Args[0]
+               v.reset(OpARM64NotEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v0.AuxInt = 0
+               v0.AddArg(ptr)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpIsSliceInBounds(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (IsSliceInBounds idx len)
+       // cond:
+       // result: (LessEqualU (CMP idx len))
+       for {
+               idx := v.Args[0]
+               len := v.Args[1]
+               v.reset(OpARM64LessEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(idx)
+               v0.AddArg(len)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLeq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq16 x y)
+       // cond:
+       // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLeq16U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq16U x y)
+       // cond:
+       // result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLeq32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq32 x y)
+       // cond:
+       // result: (LessEqual (CMPW x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLeq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq32F x y)
+       // cond:
+       // result: (GreaterEqual (FCMPS y x))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64GreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+               v0.AddArg(y)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLeq32U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq32U x y)
+       // cond:
+       // result: (LessEqualU (CMPW x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLeq64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq64 x y)
+       // cond:
+       // result: (LessEqual (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLeq64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq64F x y)
+       // cond:
+       // result: (GreaterEqual (FCMPD y x))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64GreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+               v0.AddArg(y)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLeq64U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq64U x y)
+       // cond:
+       // result: (LessEqualU (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLeq8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq8 x y)
+       // cond:
+       // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLeq8U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq8U x y)
+       // cond:
+       // result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLess16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less16 x y)
+       // cond:
+       // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessThan)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLess16U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less16U x y)
+       // cond:
+       // result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessThanU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLess32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less32 x y)
+       // cond:
+       // result: (LessThan (CMPW x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessThan)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLess32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less32F x y)
+       // cond:
+       // result: (GreaterThan (FCMPS y x))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64GreaterThan)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+               v0.AddArg(y)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLess32U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less32U x y)
+       // cond:
+       // result: (LessThanU (CMPW x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessThanU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLess64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less64 x y)
+       // cond:
+       // result: (LessThan (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessThan)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLess64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less64F x y)
+       // cond:
+       // result: (GreaterThan (FCMPD y x))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64GreaterThan)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+               v0.AddArg(y)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLess64U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less64U x y)
+       // cond:
+       // result: (LessThanU (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessThanU)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLess8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less8 x y)
+       // cond:
+       // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessThan)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLess8U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less8U x y)
+       // cond:
+       // result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessThanU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLoad(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Load <t> ptr mem)
+       // cond: t.IsBoolean()
+       // result: (MOVBUload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(t.IsBoolean()) {
+                       break
+               }
+               v.reset(OpARM64MOVBUload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: (is8BitInt(t) && isSigned(t))
+       // result: (MOVBload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is8BitInt(t) && isSigned(t)) {
+                       break
+               }
+               v.reset(OpARM64MOVBload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: (is8BitInt(t) && !isSigned(t))
+       // result: (MOVBUload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is8BitInt(t) && !isSigned(t)) {
+                       break
+               }
+               v.reset(OpARM64MOVBUload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: (is16BitInt(t) && isSigned(t))
+       // result: (MOVHload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is16BitInt(t) && isSigned(t)) {
+                       break
+               }
+               v.reset(OpARM64MOVHload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: (is16BitInt(t) && !isSigned(t))
+       // result: (MOVHUload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is16BitInt(t) && !isSigned(t)) {
+                       break
+               }
+               v.reset(OpARM64MOVHUload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: (is32BitInt(t) && isSigned(t))
+       // result: (MOVWload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is32BitInt(t) && isSigned(t)) {
+                       break
+               }
+               v.reset(OpARM64MOVWload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: (is32BitInt(t) && !isSigned(t))
+       // result: (MOVWUload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is32BitInt(t) && !isSigned(t)) {
+                       break
+               }
+               v.reset(OpARM64MOVWUload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: (is64BitInt(t) || isPtr(t))
+       // result: (MOVDload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is64BitInt(t) || isPtr(t)) {
+                       break
+               }
+               v.reset(OpARM64MOVDload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: is32BitFloat(t)
+       // result: (FMOVSload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is32BitFloat(t)) {
+                       break
+               }
+               v.reset(OpARM64FMOVSload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: is64BitFloat(t)
+       // result: (FMOVDload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is64BitFloat(t)) {
+                       break
+               }
+               v.reset(OpARM64FMOVDload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM64_OpLrot16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lrot16 <t> x [c])
+       // cond:
+       // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> (ZeroExt16to64 x) [16-c&15]))
+       for {
+               t := v.Type
+               c := v.AuxInt
+               x := v.Args[0]
+               v.reset(OpARM64OR)
+               v0 := b.NewValue0(v.Line, OpARM64SLLconst, t)
+               v0.AuxInt = c & 15
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
+               v1.AuxInt = 16 - c&15
+               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v2.AddArg(x)
+               v1.AddArg(v2)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpLrot32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lrot32 x [c])
+       // cond:
+       // result: (RORWconst x [32-c&31])
+       for {
+               c := v.AuxInt
+               x := v.Args[0]
+               v.reset(OpARM64RORWconst)
+               v.AuxInt = 32 - c&31
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpLrot64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lrot64 x [c])
+       // cond:
+       // result: (RORconst  x [64-c&63])
+       for {
+               c := v.AuxInt
+               x := v.Args[0]
+               v.reset(OpARM64RORconst)
+               v.AuxInt = 64 - c&63
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpLrot8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lrot8  <t> x [c])
+       // cond:
+       // result: (OR (SLLconst <t> x [c&7])  (SRLconst <t> (ZeroExt8to64  x) [8-c&7]))
+       for {
+               t := v.Type
+               c := v.AuxInt
+               x := v.Args[0]
+               v.reset(OpARM64OR)
+               v0 := b.NewValue0(v.Line, OpARM64SLLconst, t)
+               v0.AuxInt = c & 7
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
+               v1.AuxInt = 8 - c&7
+               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v2.AddArg(x)
+               v1.AddArg(v2)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh16x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh16x16 <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh16x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh16x32 <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh16x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh16x64  x (MOVDconst [c]))
+       // cond: uint64(c) < 16
+       // result: (SLLconst x [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 16) {
+                       break
+               }
+               v.reset(OpARM64SLLconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (Lsh16x64  _ (MOVDconst [c]))
+       // cond: uint64(c) >= 16
+       // result: (MOVDconst [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 16) {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (Lsh16x64 <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpConst64, t)
+               v1.AuxInt = 0
+               v.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v2.AuxInt = 64
+               v2.AddArg(y)
+               v.AddArg(v2)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh16x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh16x8  <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh32x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh32x16 <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh32x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh32x32 <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh32x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh32x64  x (MOVDconst [c]))
+       // cond: uint64(c) < 32
+       // result: (SLLconst x [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 32) {
+                       break
+               }
+               v.reset(OpARM64SLLconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (Lsh32x64  _ (MOVDconst [c]))
+       // cond: uint64(c) >= 32
+       // result: (MOVDconst [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 32) {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (Lsh32x64 <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpConst64, t)
+               v1.AuxInt = 0
+               v.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v2.AuxInt = 64
+               v2.AddArg(y)
+               v.AddArg(v2)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh32x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh32x8  <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh64x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh64x16 <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh64x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh64x32 <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh64x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh64x64  x (MOVDconst [c]))
+       // cond: uint64(c) < 64
+       // result: (SLLconst x [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 64) {
+                       break
+               }
+               v.reset(OpARM64SLLconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (Lsh64x64  _ (MOVDconst [c]))
+       // cond: uint64(c) >= 64
+       // result: (MOVDconst [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 64) {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (Lsh64x64 <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpConst64, t)
+               v1.AuxInt = 0
+               v.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v2.AuxInt = 64
+               v2.AddArg(y)
+               v.AddArg(v2)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh64x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh64x8  <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh8x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x16 <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh8x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x32 <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh8x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x64   x (MOVDconst [c]))
+       // cond: uint64(c) < 8
+       // result: (SLLconst x [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 8) {
+                       break
+               }
+               v.reset(OpARM64SLLconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (Lsh8x64   _ (MOVDconst [c]))
+       // cond: uint64(c) >= 8
+       // result: (MOVDconst [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 8) {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (Lsh8x64 <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpConst64, t)
+               v1.AuxInt = 0
+               v.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v2.AuxInt = 64
+               v2.AddArg(y)
+               v.AddArg(v2)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh8x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x8  <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpMod16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod16 x y)
+       // cond:
+       // result: (MODW (SignExt16to32 x) (SignExt16to32 y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64MODW)
+               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpMod16u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod16u x y)
+       // cond:
+       // result: (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64UMODW)
+               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpMod32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod32 x y)
+       // cond:
+       // result: (MODW x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64MODW)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpMod32u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod32u x y)
+       // cond:
+       // result: (UMODW x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64UMODW)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpMod64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod64 x y)
+       // cond:
+       // result: (MOD x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64MOD)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpMod64u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod64u x y)
+       // cond:
+       // result: (UMOD x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64UMOD)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpMod8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod8 x y)
+       // cond:
+       // result: (MODW (SignExt8to32 x) (SignExt8to32 y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64MODW)
+               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpMod8u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod8u x y)
+       // cond:
+       // result: (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64UMODW)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpMove(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Move [s] _ _ mem)
+       // cond: SizeAndAlign(s).Size() == 0
+       // result: mem
+       for {
+               s := v.AuxInt
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 0) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = mem.Type
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 1
+       // result: (MOVBstore dst (MOVBUload src mem) mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 1) {
+                       break
+               }
+               v.reset(OpARM64MOVBstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore dst (MOVHUload src mem) mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVHstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 2
+       // result: (MOVBstore [1] dst (MOVBUload [1] src mem)           (MOVBstore dst (MOVBUload src mem) mem))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 2) {
+                       break
+               }
+               v.reset(OpARM64MOVBstore)
+               v.AuxInt = 1
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+               v0.AuxInt = 1
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+       // result: (MOVWstore dst (MOVWUload src mem) mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVWstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore [2] dst (MOVHUload [2] src mem)           (MOVHstore dst (MOVHUload src mem) mem))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVHstore)
+               v.AuxInt = 2
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+               v0.AuxInt = 2
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 4
+       // result: (MOVBstore [3] dst (MOVBUload [3] src mem)           (MOVBstore [2] dst (MOVBUload [2] src mem)                      (MOVBstore [1] dst (MOVBUload [1] src mem)                              (MOVBstore dst (MOVBUload src mem) mem))))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 4) {
+                       break
+               }
+               v.reset(OpARM64MOVBstore)
+               v.AuxInt = 3
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+               v0.AuxInt = 3
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+               v1.AuxInt = 2
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+               v2.AuxInt = 2
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+               v3.AuxInt = 1
+               v3.AddArg(dst)
+               v4 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+               v4.AuxInt = 1
+               v4.AddArg(src)
+               v4.AddArg(mem)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+               v5.AddArg(dst)
+               v6 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+               v6.AddArg(src)
+               v6.AddArg(mem)
+               v5.AddArg(v6)
+               v5.AddArg(mem)
+               v3.AddArg(v5)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
+       // result: (MOVDstore dst (MOVDload src mem) mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVDstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
+       // result: (MOVWstore [4] dst (MOVWUload [4] src mem)           (MOVWstore dst (MOVWUload src mem) mem))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVWstore)
+               v.AuxInt = 4
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
+               v0.AuxInt = 4
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore [6] dst (MOVHUload [6] src mem)           (MOVHstore [4] dst (MOVHUload [4] src mem)                      (MOVHstore [2] dst (MOVHUload [2] src mem)                              (MOVHstore dst (MOVHUload src mem) mem))))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVHstore)
+               v.AuxInt = 6
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+               v0.AuxInt = 6
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+               v1.AuxInt = 4
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+               v2.AuxInt = 4
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+               v3.AuxInt = 2
+               v3.AddArg(dst)
+               v4 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+               v4.AuxInt = 2
+               v4.AddArg(src)
+               v4.AddArg(mem)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+               v5.AddArg(dst)
+               v6 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+               v6.AddArg(src)
+               v6.AddArg(mem)
+               v5.AddArg(v6)
+               v5.AddArg(mem)
+               v3.AddArg(v5)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 3
+       // result: (MOVBstore [2] dst (MOVBUload [2] src mem)           (MOVBstore [1] dst (MOVBUload [1] src mem)                      (MOVBstore dst (MOVBUload src mem) mem)))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 3) {
+                       break
+               }
+               v.reset(OpARM64MOVBstore)
+               v.AuxInt = 2
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+               v0.AuxInt = 2
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+               v1.AuxInt = 1
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+               v2.AuxInt = 1
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+               v3.AddArg(dst)
+               v4 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+               v4.AddArg(src)
+               v4.AddArg(mem)
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore [4] dst (MOVHUload [4] src mem)           (MOVHstore [2] dst (MOVHUload [2] src mem)                      (MOVHstore dst (MOVHUload src mem) mem)))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVHstore)
+               v.AuxInt = 4
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+               v0.AuxInt = 4
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+               v1.AuxInt = 2
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+               v2.AuxInt = 2
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+               v3.AddArg(dst)
+               v4 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+               v4.AddArg(src)
+               v4.AddArg(mem)
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0
+       // result: (MOVWstore [8] dst (MOVWUload [8] src mem)           (MOVWstore [4] dst (MOVWUload [4] src mem)                      (MOVWstore dst (MOVWUload src mem) mem)))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVWstore)
+               v.AuxInt = 8
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
+               v0.AuxInt = 8
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
+               v1.AuxInt = 4
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
+               v2.AuxInt = 4
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
+               v3.AddArg(dst)
+               v4 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
+               v4.AddArg(src)
+               v4.AddArg(mem)
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0
+       // result: (MOVDstore [8] dst (MOVDload [8] src mem)            (MOVDstore dst (MOVDload src mem) mem))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVDstore)
+               v.AuxInt = 8
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
+               v0.AuxInt = 8
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0
+       // result: (MOVDstore [16] dst (MOVDload [16] src mem)          (MOVDstore [8] dst (MOVDload [8] src mem)                       (MOVDstore dst (MOVDload src mem) mem)))
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVDstore)
+               v.AuxInt = 16
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
+               v0.AuxInt = 16
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
+               v1.AuxInt = 8
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
+               v2.AuxInt = 8
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
+               v3.AddArg(dst)
+               v4 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
+               v4.AddArg(src)
+               v4.AddArg(mem)
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() > 24 || SizeAndAlign(s).Align()%8 != 0
+       // result: (LoweredMove [SizeAndAlign(s).Align()]               dst             src             (ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])            mem)
+       for {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() > 24 || SizeAndAlign(s).Align()%8 != 0) {
+                       break
+               }
+               v.reset(OpARM64LoweredMove)
+               v.AuxInt = SizeAndAlign(s).Align()
+               v.AddArg(dst)
+               v.AddArg(src)
+               v0 := b.NewValue0(v.Line, OpARM64ADDconst, src.Type)
+               v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+               v0.AddArg(src)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM64_OpMul16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul16 x y)
+       // cond:
+       // result: (MULW x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64MULW)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpMul32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul32 x y)
+       // cond:
+       // result: (MULW x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64MULW)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpMul32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul32F x y)
+       // cond:
+       // result: (FMULS x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64FMULS)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpMul64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul64 x y)
+       // cond:
+       // result: (MUL x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64MUL)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpMul64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul64F x y)
+       // cond:
+       // result: (FMULD x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64FMULD)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpMul8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul8 x y)
+       // cond:
+       // result: (MULW x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64MULW)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpNeg16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg16 x)
+       // cond:
+       // result: (NEG x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64NEG)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpNeg32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg32 x)
+       // cond:
+       // result: (NEG x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64NEG)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpNeg32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg32F x)
+       // cond:
+       // result: (FNEGS x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64FNEGS)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpNeg64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg64 x)
+       // cond:
+       // result: (NEG x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64NEG)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpNeg64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg64F x)
+       // cond:
+       // result: (FNEGD x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64FNEGD)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpNeg8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg8 x)
+       // cond:
+       // result: (NEG x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64NEG)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpNeq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq16 x y)
+       // cond:
+       // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64NotEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpNeq32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq32 x y)
+       // cond:
+       // result: (NotEqual (CMPW x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64NotEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpNeq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq32F x y)
+       // cond:
+       // result: (NotEqual (FCMPS x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64NotEqual)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpNeq64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq64 x y)
+       // cond:
+       // result: (NotEqual (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64NotEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpNeq64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq64F x y)
+       // cond:
+       // result: (NotEqual (FCMPD x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64NotEqual)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpNeq8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq8 x y)
+       // cond:
+       // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64NotEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpNeqB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NeqB x y)
+       // cond:
+       // result: (XOR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64XOR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpNeqPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NeqPtr x y)
+       // cond:
+       // result: (NotEqual (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64NotEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpNilCheck(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NilCheck ptr mem)
+       // cond:
+       // result: (LoweredNilCheck ptr mem)
+       for {
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64LoweredNilCheck)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValueARM64_OpNot(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Not x)
+       // cond:
+       // result: (XOR (MOVDconst [1]) x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64XOR)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 1
+               v.AddArg(v0)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpOffPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (OffPtr [off] ptr:(SP))
+       // cond:
+       // result: (MOVDaddr [off] ptr)
+       for {
+               off := v.AuxInt
+               ptr := v.Args[0]
+               if ptr.Op != OpSP {
+                       break
+               }
+               v.reset(OpARM64MOVDaddr)
+               v.AuxInt = off
+               v.AddArg(ptr)
+               return true
+       }
+       // match: (OffPtr [off] ptr)
+       // cond:
+       // result: (ADDconst [off] ptr)
+       for {
+               off := v.AuxInt
+               ptr := v.Args[0]
+               v.reset(OpARM64ADDconst)
+               v.AuxInt = off
+               v.AddArg(ptr)
+               return true
+       }
+}
+func rewriteValueARM64_OpOr16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or16 x y)
+       // cond:
+       // result: (OR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64OR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpOr32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or32 x y)
+       // cond:
+       // result: (OR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64OR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpOr64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or64 x y)
+       // cond:
+       // result: (OR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64OR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpOr8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or8 x y)
+       // cond:
+       // result: (OR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64OR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpOrB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (OrB x y)
+       // cond:
+       // result: (OR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64OR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh16Ux16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux16 <t> x y)
+       // cond:
+       // result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpConst64, t)
+               v3.AuxInt = 0
+               v.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh16Ux32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux32 <t> x y)
+       // cond:
+       // result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpConst64, t)
+               v3.AuxInt = 0
+               v.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh16Ux64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux64 x (MOVDconst [c]))
+       // cond: uint64(c) < 16
+       // result: (SRLconst (ZeroExt16to64 x) [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 16) {
+                       break
+               }
+               v.reset(OpARM64SRLconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (Rsh16Ux64 _ (MOVDconst [c]))
+       // cond: uint64(c) >= 16
+       // result: (MOVDconst [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 16) {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (Rsh16Ux64 <t> x y)
+       // cond:
+       // result: (CSELULT (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v3.AddArg(y)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh16Ux8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux8  <t> x y)
+       // cond:
+       // result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpConst64, t)
+               v3.AuxInt = 0
+               v.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh16x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x16 x y)
+       // cond:
+       // result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRA)
+               v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+               v3.AuxInt = 63
+               v1.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v1.AddArg(v4)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh16x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x32 x y)
+       // cond:
+       // result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRA)
+               v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+               v3.AuxInt = 63
+               v1.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v1.AddArg(v4)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh16x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x64  x (MOVDconst [c]))
+       // cond: uint64(c) < 16
+       // result: (SRAconst (SignExt16to64 x) [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 16) {
+                       break
+               }
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (Rsh16x64 x (MOVDconst [c]))
+       // cond: uint64(c) >= 16
+       // result: (SRAconst (SignExt16to64 x) [63])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 16) {
+                       break
+               }
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = 63
+               v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (Rsh16x64 x y)
+       // cond:
+       // result: (SRA (SignExt16to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRA)
+               v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v1.AddArg(y)
+               v2 := b.NewValue0(v.Line, OpConst64, y.Type)
+               v2.AuxInt = 63
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh16x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x8  x y)
+       // cond:
+       // result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt8to64  y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64  y))))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRA)
+               v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+               v3.AuxInt = 63
+               v1.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v1.AddArg(v4)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh32Ux16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux16 <t> x y)
+       // cond:
+       // result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpConst64, t)
+               v3.AuxInt = 0
+               v.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh32Ux32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux32 <t> x y)
+       // cond:
+       // result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpConst64, t)
+               v3.AuxInt = 0
+               v.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh32Ux64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux64 x (MOVDconst [c]))
+       // cond: uint64(c) < 32
+       // result: (SRLconst (ZeroExt32to64 x) [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 32) {
+                       break
+               }
+               v.reset(OpARM64SRLconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (Rsh32Ux64 _ (MOVDconst [c]))
+       // cond: uint64(c) >= 32
+       // result: (MOVDconst [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 32) {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (Rsh32Ux64 <t> x y)
+       // cond:
+       // result: (CSELULT (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v3.AddArg(y)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh32Ux8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux8  <t> x y)
+       // cond:
+       // result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpConst64, t)
+               v3.AuxInt = 0
+               v.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh32x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32x16 x y)
+       // cond:
+       // result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRA)
+               v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+               v3.AuxInt = 63
+               v1.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v1.AddArg(v4)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh32x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32x32 x y)
+       // cond:
+       // result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRA)
+               v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+               v3.AuxInt = 63
+               v1.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v1.AddArg(v4)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh32x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32x64  x (MOVDconst [c]))
+       // cond: uint64(c) < 32
+       // result: (SRAconst (SignExt32to64 x) [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 32) {
+                       break
+               }
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (Rsh32x64 x (MOVDconst [c]))
+       // cond: uint64(c) >= 32
+       // result: (SRAconst (SignExt32to64 x) [63])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 32) {
+                       break
+               }
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = 63
+               v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (Rsh32x64 x y)
+       // cond:
+       // result: (SRA (SignExt32to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRA)
+               v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v1.AddArg(y)
+               v2 := b.NewValue0(v.Line, OpConst64, y.Type)
+               v2.AuxInt = 63
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh32x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32x8  x y)
+       // cond:
+       // result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt8to64  y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64  y))))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRA)
+               v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+               v3.AuxInt = 63
+               v1.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v1.AddArg(v4)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh64Ux16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh64Ux16 <t> x y)
+       // cond:
+       // result: (CSELULT (SRL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh64Ux32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh64Ux32 <t> x y)
+       // cond:
+       // result: (CSELULT (SRL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh64Ux64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh64Ux64 x (MOVDconst [c]))
+       // cond: uint64(c) < 64
+       // result: (SRLconst x [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 64) {
+                       break
+               }
+               v.reset(OpARM64SRLconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (Rsh64Ux64 _ (MOVDconst [c]))
+       // cond: uint64(c) >= 64
+       // result: (MOVDconst [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 64) {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (Rsh64Ux64 <t> x y)
+       // cond:
+       // result: (CSELULT (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpConst64, t)
+               v1.AuxInt = 0
+               v.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v2.AuxInt = 64
+               v2.AddArg(y)
+               v.AddArg(v2)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh64Ux8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh64Ux8  <t> x y)
+       // cond:
+       // result: (CSELULT (SRL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh64x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh64x16 x y)
+       // cond:
+       // result: (SRA x (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRA)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpConst64, y.Type)
+               v2.AuxInt = 63
+               v0.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v0.AddArg(v3)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh64x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh64x32 x y)
+       // cond:
+       // result: (SRA x (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRA)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpConst64, y.Type)
+               v2.AuxInt = 63
+               v0.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v0.AddArg(v3)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh64x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh64x64  x (MOVDconst [c]))
+       // cond: uint64(c) < 64
+       // result: (SRAconst x [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 64) {
+                       break
+               }
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (Rsh64x64 x (MOVDconst [c]))
+       // cond: uint64(c) >= 64
+       // result: (SRAconst x [63])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 64) {
+                       break
+               }
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = 63
+               v.AddArg(x)
+               return true
+       }
+       // match: (Rsh64x64 x y)
+       // cond:
+       // result: (SRA x (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRA)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpConst64, y.Type)
+               v1.AuxInt = 63
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v2.AuxInt = 64
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh64x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh64x8  x y)
+       // cond:
+       // result: (SRA x (CSELULT <y.Type> (ZeroExt8to64  y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64  y))))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRA)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpConst64, y.Type)
+               v2.AuxInt = 63
+               v0.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v0.AddArg(v3)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh8Ux16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8Ux16 <t> x y)
+       // cond:
+       // result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpConst64, t)
+               v3.AuxInt = 0
+               v.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh8Ux32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8Ux32 <t> x y)
+       // cond:
+       // result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpConst64, t)
+               v3.AuxInt = 0
+               v.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh8Ux64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8Ux64  x (MOVDconst [c]))
+       // cond: uint64(c) < 8
+       // result: (SRLconst (ZeroExt8to64  x) [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 8) {
+                       break
+               }
+               v.reset(OpARM64SRLconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (Rsh8Ux64  _ (MOVDconst [c]))
+       // cond: uint64(c) >= 8
+       // result: (MOVDconst [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 8) {
+                       break
+               }
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (Rsh8Ux64 <t> x y)
+       // cond:
+       // result: (CSELULT (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v3.AddArg(y)
+               v.AddArg(v3)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh8Ux8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8Ux8  <t> x y)
+       // cond:
+       // result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpConst64, t)
+               v3.AuxInt = 0
+               v.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v.AddArg(v4)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh8x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x16 x y)
+       // cond:
+       // result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRA)
+               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+               v3.AuxInt = 63
+               v1.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v1.AddArg(v4)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh8x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x32 x y)
+       // cond:
+       // result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRA)
+               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+               v3.AuxInt = 63
+               v1.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v1.AddArg(v4)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh8x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x64   x (MOVDconst [c]))
+       // cond: uint64(c) < 8
+       // result: (SRAconst (SignExt8to64  x) [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 8) {
+                       break
+               }
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (Rsh8x64  x (MOVDconst [c]))
+       // cond: uint64(c) >= 8
+       // result: (SRAconst (SignExt8to64  x) [63])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 8) {
+                       break
+               }
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = 63
+               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (Rsh8x64 x y)
+       // cond:
+       // result: (SRA (SignExt8to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRA)
+               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v1.AddArg(y)
+               v2 := b.NewValue0(v.Line, OpConst64, y.Type)
+               v2.AuxInt = 63
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v3.AddArg(y)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpRsh8x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x8  x y)
+       // cond:
+       // result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt8to64  y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64  y))))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SRA)
+               v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+               v3.AuxInt = 63
+               v1.AddArg(v3)
+               v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v4.AuxInt = 64
+               v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v5.AddArg(y)
+               v4.AddArg(v5)
+               v1.AddArg(v4)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpSignExt16to32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SignExt16to32 x)
+       // cond:
+       // result: (MOVHreg x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64MOVHreg)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpSignExt16to64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SignExt16to64 x)
+       // cond:
+       // result: (MOVHreg x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64MOVHreg)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpSignExt32to64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SignExt32to64 x)
+       // cond:
+       // result: (MOVWreg x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64MOVWreg)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpSignExt8to16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SignExt8to16 x)
+       // cond:
+       // result: (MOVBreg x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64MOVBreg)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpSignExt8to32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SignExt8to32 x)
+       // cond:
+       // result: (MOVBreg x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64MOVBreg)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpSignExt8to64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SignExt8to64 x)
+       // cond:
+       // result: (MOVBreg x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64MOVBreg)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpSqrt(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Sqrt x)
+       // cond:
+       // result: (FSQRTD x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64FSQRTD)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpStaticCall(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (StaticCall [argwid] {target} mem)
+       // cond:
+       // result: (CALLstatic [argwid] {target} mem)
+       for {
+               argwid := v.AuxInt
+               target := v.Aux
+               mem := v.Args[0]
+               v.reset(OpARM64CALLstatic)
+               v.AuxInt = argwid
+               v.Aux = target
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValueARM64_OpStore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Store [1] ptr val mem)
+       // cond:
+       // result: (MOVBstore ptr val mem)
+       for {
+               if v.AuxInt != 1 {
+                       break
+               }
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVBstore)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Store [2] ptr val mem)
+       // cond:
+       // result: (MOVHstore ptr val mem)
+       for {
+               if v.AuxInt != 2 {
+                       break
+               }
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVHstore)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Store [4] ptr val mem)
+       // cond: !is32BitFloat(val.Type)
+       // result: (MOVWstore ptr val mem)
+       for {
+               if v.AuxInt != 4 {
+                       break
+               }
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(!is32BitFloat(val.Type)) {
+                       break
+               }
+               v.reset(OpARM64MOVWstore)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Store [8] ptr val mem)
+       // cond: !is64BitFloat(val.Type)
+       // result: (MOVDstore ptr val mem)
+       for {
+               if v.AuxInt != 8 {
+                       break
+               }
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(!is64BitFloat(val.Type)) {
+                       break
+               }
+               v.reset(OpARM64MOVDstore)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Store [4] ptr val mem)
+       // cond: is32BitFloat(val.Type)
+       // result: (FMOVSstore ptr val mem)
+       for {
+               if v.AuxInt != 4 {
+                       break
+               }
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32BitFloat(val.Type)) {
+                       break
+               }
+               v.reset(OpARM64FMOVSstore)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Store [8] ptr val mem)
+       // cond: is64BitFloat(val.Type)
+       // result: (FMOVDstore ptr val mem)
+       for {
+               if v.AuxInt != 8 {
+                       break
+               }
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is64BitFloat(val.Type)) {
+                       break
+               }
+               v.reset(OpARM64FMOVDstore)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM64_OpSub16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Sub16 x y)
+       // cond:
+       // result: (SUB x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SUB)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpSub32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Sub32 x y)
+       // cond:
+       // result: (SUB x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SUB)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpSub32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Sub32F x y)
+       // cond:
+       // result: (FSUBS x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64FSUBS)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpSub64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Sub64 x y)
+       // cond:
+       // result: (SUB x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SUB)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpSub64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Sub64F x y)
+       // cond:
+       // result: (FSUBD x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64FSUBD)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpSub8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Sub8 x y)
+       // cond:
+       // result: (SUB x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SUB)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpSubPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SubPtr x y)
+       // cond:
+       // result: (SUB x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64SUB)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpTrunc16to8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Trunc16to8 x)
+       // cond:
+       // result: x
+       for {
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpTrunc32to16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Trunc32to16 x)
+       // cond:
+       // result: x
+       for {
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpTrunc32to8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Trunc32to8 x)
+       // cond:
+       // result: x
+       for {
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpTrunc64to16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Trunc64to16 x)
+       // cond:
+       // result: x
+       for {
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpTrunc64to32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Trunc64to32 x)
+       // cond:
+       // result: x
+       for {
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpTrunc64to8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Trunc64to8 x)
+       // cond:
+       // result: x
+       for {
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpXor16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Xor16 x y)
+       // cond:
+       // result: (XOR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64XOR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpXor32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Xor32 x y)
+       // cond:
+       // result: (XOR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64XOR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpXor64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Xor64 x y)
+       // cond:
+       // result: (XOR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64XOR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpXor8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Xor8 x y)
+       // cond:
+       // result: (XOR x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64XOR)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM64_OpZero(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Zero [s] _ mem)
+       // cond: SizeAndAlign(s).Size() == 0
+       // result: mem
+       for {
+               s := v.AuxInt
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 0) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = mem.Type
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 1
+       // result: (MOVBstore ptr (MOVDconst [0]) mem)
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 1) {
+                       break
+               }
+               v.reset(OpARM64MOVBstore)
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore ptr (MOVDconst [0]) mem)
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVHstore)
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 2
+       // result: (MOVBstore [1] ptr (MOVDconst [0])           (MOVBstore ptr (MOVDconst [0]) mem))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 2) {
+                       break
+               }
+               v.reset(OpARM64MOVBstore)
+               v.AuxInt = 1
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+       // result: (MOVWstore ptr (MOVDconst [0]) mem)
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVWstore)
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore [2] ptr (MOVDconst [0])           (MOVHstore ptr (MOVDconst [0]) mem))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVHstore)
+               v.AuxInt = 2
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 4
+       // result: (MOVBstore [3] ptr (MOVDconst [0])           (MOVBstore [2] ptr (MOVDconst [0])                      (MOVBstore [1] ptr (MOVDconst [0])                              (MOVBstore ptr (MOVDconst [0]) mem))))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 4) {
+                       break
+               }
+               v.reset(OpARM64MOVBstore)
+               v.AuxInt = 3
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+               v1.AuxInt = 2
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+               v3.AuxInt = 1
+               v3.AddArg(ptr)
+               v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v4.AuxInt = 0
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+               v5.AddArg(ptr)
+               v6 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v6.AuxInt = 0
+               v5.AddArg(v6)
+               v5.AddArg(mem)
+               v3.AddArg(v5)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
+       // result: (MOVDstore ptr (MOVDconst [0]) mem)
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVDstore)
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
+       // result: (MOVWstore [4] ptr (MOVDconst [0])           (MOVWstore ptr (MOVDconst [0]) mem))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVWstore)
+               v.AuxInt = 4
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore [6] ptr (MOVDconst [0])           (MOVHstore [4] ptr (MOVDconst [0])                      (MOVHstore [2] ptr (MOVDconst [0])                              (MOVHstore ptr (MOVDconst [0]) mem))))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVHstore)
+               v.AuxInt = 6
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+               v1.AuxInt = 4
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+               v3.AuxInt = 2
+               v3.AddArg(ptr)
+               v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v4.AuxInt = 0
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+               v5.AddArg(ptr)
+               v6 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v6.AuxInt = 0
+               v5.AddArg(v6)
+               v5.AddArg(mem)
+               v3.AddArg(v5)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 3
+       // result: (MOVBstore [2] ptr (MOVDconst [0])           (MOVBstore [1] ptr (MOVDconst [0])                      (MOVBstore ptr (MOVDconst [0]) mem)))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 3) {
+                       break
+               }
+               v.reset(OpARM64MOVBstore)
+               v.AuxInt = 2
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+               v1.AuxInt = 1
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+               v3.AddArg(ptr)
+               v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v4.AuxInt = 0
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore [4] ptr (MOVDconst [0])           (MOVHstore [2] ptr (MOVDconst [0])                      (MOVHstore ptr (MOVDconst [0]) mem)))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVHstore)
+               v.AuxInt = 4
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+               v1.AuxInt = 2
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+               v3.AddArg(ptr)
+               v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v4.AuxInt = 0
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0
+       // result: (MOVWstore [8] ptr (MOVDconst [0])           (MOVWstore [4] ptr (MOVDconst [0])                      (MOVWstore ptr (MOVDconst [0]) mem)))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVWstore)
+               v.AuxInt = 8
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
+               v1.AuxInt = 4
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
+               v3.AddArg(ptr)
+               v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v4.AuxInt = 0
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0
+       // result: (MOVDstore [8] ptr (MOVDconst [0])           (MOVDstore ptr (MOVDconst [0]) mem))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVDstore)
+               v.AuxInt = 8
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0
+       // result: (MOVDstore [16] ptr (MOVDconst [0])          (MOVDstore [8] ptr (MOVDconst [0])                      (MOVDstore ptr (MOVDconst [0]) mem)))
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0) {
+                       break
+               }
+               v.reset(OpARM64MOVDstore)
+               v.AuxInt = 16
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 0
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
+               v1.AuxInt = 8
+               v1.AddArg(ptr)
+               v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v2.AuxInt = 0
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
+               v3.AddArg(ptr)
+               v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v4.AuxInt = 0
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size() <= 8*128        && SizeAndAlign(s).Align()%8 == 0 && !config.noDuffDevice
+       // result: (DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/8))] ptr mem)
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size() <= 8*128 && SizeAndAlign(s).Align()%8 == 0 && !config.noDuffDevice) {
+                       break
+               }
+               v.reset(OpARM64DUFFZERO)
+               v.AuxInt = 4 * (128 - int64(SizeAndAlign(s).Size()/8))
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Zero [s] ptr mem)
+       // cond: (SizeAndAlign(s).Size() > 8*128 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0
+       // result: (LoweredZero [SizeAndAlign(s).Align()]               ptr             (ADDconst <ptr.Type> [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)] ptr)            mem)
+       for {
+               s := v.AuxInt
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !((SizeAndAlign(s).Size() > 8*128 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0) {
+                       break
+               }
+               v.reset(OpARM64LoweredZero)
+               v.AuxInt = SizeAndAlign(s).Align()
+               v.AddArg(ptr)
+               v0 := b.NewValue0(v.Line, OpARM64ADDconst, ptr.Type)
+               v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+               v0.AddArg(ptr)
+               v.AddArg(v0)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM64_OpZeroExt16to32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ZeroExt16to32 x)
+       // cond:
+       // result: (MOVHUreg x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64MOVHUreg)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpZeroExt16to64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ZeroExt16to64 x)
+       // cond:
+       // result: (MOVHUreg x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64MOVHUreg)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpZeroExt32to64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ZeroExt32to64 x)
+       // cond:
+       // result: (MOVWUreg x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64MOVWUreg)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpZeroExt8to16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ZeroExt8to16 x)
+       // cond:
+       // result: (MOVBUreg x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64MOVBUreg)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpZeroExt8to32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ZeroExt8to32 x)
+       // cond:
+       // result: (MOVBUreg x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64MOVBUreg)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpZeroExt8to64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ZeroExt8to64 x)
+       // cond:
+       // result: (MOVBUreg x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARM64MOVBUreg)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteBlockARM64(b *Block) bool {
+       switch b.Kind {
+       case BlockARM64EQ:
+               // match: (EQ (FlagEQ) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagEQ {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (EQ (FlagLT_ULT) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagLT_ULT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (EQ (FlagLT_UGT) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagLT_UGT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (EQ (FlagGT_ULT) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagGT_ULT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (EQ (FlagGT_UGT) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagGT_UGT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (EQ (InvertFlags cmp) yes no)
+               // cond:
+               // result: (EQ cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64InvertFlags {
+                               break
+                       }
+                       cmp := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64EQ
+                       b.SetControl(cmp)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+       case BlockARM64GE:
+               // match: (GE (FlagEQ) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagEQ {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (GE (FlagLT_ULT) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagLT_ULT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (GE (FlagLT_UGT) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagLT_UGT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (GE (FlagGT_ULT) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagGT_ULT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (GE (FlagGT_UGT) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagGT_UGT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (GE (InvertFlags cmp) yes no)
+               // cond:
+               // result: (LE cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64InvertFlags {
+                               break
+                       }
+                       cmp := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64LE
+                       b.SetControl(cmp)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+       case BlockARM64GT:
+               // match: (GT (FlagEQ) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagEQ {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (GT (FlagLT_ULT) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagLT_ULT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (GT (FlagLT_UGT) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagLT_UGT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (GT (FlagGT_ULT) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagGT_ULT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (GT (FlagGT_UGT) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagGT_UGT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (GT (InvertFlags cmp) yes no)
+               // cond:
+               // result: (LT cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64InvertFlags {
+                               break
+                       }
+                       cmp := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64LT
+                       b.SetControl(cmp)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+       case BlockIf:
+               // match: (If (Equal cc) yes no)
+               // cond:
+               // result: (EQ cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64Equal {
+                               break
+                       }
+                       cc := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64EQ
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (If (NotEqual cc) yes no)
+               // cond:
+               // result: (NE cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64NotEqual {
+                               break
+                       }
+                       cc := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64NE
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (If (LessThan cc) yes no)
+               // cond:
+               // result: (LT cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64LessThan {
+                               break
+                       }
+                       cc := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64LT
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (If (LessThanU cc) yes no)
+               // cond:
+               // result: (ULT cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64LessThanU {
+                               break
+                       }
+                       cc := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64ULT
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (If (LessEqual cc) yes no)
+               // cond:
+               // result: (LE cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64LessEqual {
+                               break
+                       }
+                       cc := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64LE
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (If (LessEqualU cc) yes no)
+               // cond:
+               // result: (ULE cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64LessEqualU {
+                               break
+                       }
+                       cc := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64ULE
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (If (GreaterThan cc) yes no)
+               // cond:
+               // result: (GT cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64GreaterThan {
+                               break
+                       }
+                       cc := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64GT
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (If (GreaterThanU cc) yes no)
+               // cond:
+               // result: (UGT cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64GreaterThanU {
+                               break
+                       }
+                       cc := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64UGT
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (If (GreaterEqual cc) yes no)
+               // cond:
+               // result: (GE cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64GreaterEqual {
+                               break
+                       }
+                       cc := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64GE
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (If (GreaterEqualU cc) yes no)
+               // cond:
+               // result: (UGE cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64GreaterEqualU {
+                               break
+                       }
+                       cc := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64UGE
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (If cond yes no)
+               // cond:
+               // result: (NE (CMPconst [0] cond) yes no)
+               for {
+                       v := b.Control
+                       cond := b.Control
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64NE
+                       v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+                       v0.AuxInt = 0
+                       v0.AddArg(cond)
+                       b.SetControl(v0)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+       case BlockARM64LE:
+               // match: (LE (FlagEQ) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagEQ {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (LE (FlagLT_ULT) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagLT_ULT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (LE (FlagLT_UGT) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagLT_UGT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (LE (FlagGT_ULT) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagGT_ULT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (LE (FlagGT_UGT) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagGT_UGT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (LE (InvertFlags cmp) yes no)
+               // cond:
+               // result: (GE cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64InvertFlags {
+                               break
+                       }
+                       cmp := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64GE
+                       b.SetControl(cmp)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+       case BlockARM64LT:
+               // match: (LT (FlagEQ) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagEQ {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (LT (FlagLT_ULT) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagLT_ULT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (LT (FlagLT_UGT) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagLT_UGT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (LT (FlagGT_ULT) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagGT_ULT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (LT (FlagGT_UGT) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagGT_UGT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (LT (InvertFlags cmp) yes no)
+               // cond:
+               // result: (GT cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64InvertFlags {
+                               break
+                       }
+                       cmp := v.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64GT
+                       b.SetControl(cmp)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+       case BlockARM64NE:
+               // match: (NE (CMPconst [0] (Equal cc)) yes no)
+               // cond:
+               // result: (EQ cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64CMPconst {
+                               break
+                       }
+                       if v.AuxInt != 0 {
+                               break
+                       }
+                       v_0 := v.Args[0]
+                       if v_0.Op != OpARM64Equal {
+                               break
+                       }
+                       cc := v_0.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64EQ
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (CMPconst [0] (NotEqual cc)) yes no)
+               // cond:
+               // result: (NE cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64CMPconst {
+                               break
+                       }
+                       if v.AuxInt != 0 {
+                               break
+                       }
+                       v_0 := v.Args[0]
+                       if v_0.Op != OpARM64NotEqual {
+                               break
+                       }
+                       cc := v_0.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64NE
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (CMPconst [0] (LessThan cc)) yes no)
+               // cond:
+               // result: (LT cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64CMPconst {
+                               break
+                       }
+                       if v.AuxInt != 0 {
+                               break
+                       }
+                       v_0 := v.Args[0]
+                       if v_0.Op != OpARM64LessThan {
+                               break
+                       }
+                       cc := v_0.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64LT
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (CMPconst [0] (LessThanU cc)) yes no)
+               // cond:
+               // result: (ULT cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64CMPconst {
+                               break
+                       }
+                       if v.AuxInt != 0 {
+                               break
+                       }
+                       v_0 := v.Args[0]
+                       if v_0.Op != OpARM64LessThanU {
+                               break
+                       }
+                       cc := v_0.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64ULT
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (CMPconst [0] (LessEqual cc)) yes no)
+               // cond:
+               // result: (LE cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64CMPconst {
+                               break
+                       }
+                       if v.AuxInt != 0 {
+                               break
+                       }
+                       v_0 := v.Args[0]
+                       if v_0.Op != OpARM64LessEqual {
+                               break
+                       }
+                       cc := v_0.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64LE
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (CMPconst [0] (LessEqualU cc)) yes no)
+               // cond:
+               // result: (ULE cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64CMPconst {
+                               break
+                       }
+                       if v.AuxInt != 0 {
+                               break
+                       }
+                       v_0 := v.Args[0]
+                       if v_0.Op != OpARM64LessEqualU {
+                               break
+                       }
+                       cc := v_0.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64ULE
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (CMPconst [0] (GreaterThan cc)) yes no)
+               // cond:
+               // result: (GT cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64CMPconst {
+                               break
+                       }
+                       if v.AuxInt != 0 {
+                               break
+                       }
+                       v_0 := v.Args[0]
+                       if v_0.Op != OpARM64GreaterThan {
+                               break
+                       }
+                       cc := v_0.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64GT
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no)
+               // cond:
+               // result: (UGT cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64CMPconst {
+                               break
+                       }
+                       if v.AuxInt != 0 {
+                               break
+                       }
+                       v_0 := v.Args[0]
+                       if v_0.Op != OpARM64GreaterThanU {
+                               break
+                       }
+                       cc := v_0.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64UGT
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no)
+               // cond:
+               // result: (GE cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64CMPconst {
+                               break
+                       }
+                       if v.AuxInt != 0 {
+                               break
+                       }
+                       v_0 := v.Args[0]
+                       if v_0.Op != OpARM64GreaterEqual {
+                               break
+                       }
+                       cc := v_0.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64GE
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no)
+               // cond:
+               // result: (UGE cc yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64CMPconst {
+                               break
+                       }
+                       if v.AuxInt != 0 {
+                               break
+                       }
+                       v_0 := v.Args[0]
+                       if v_0.Op != OpARM64GreaterEqualU {
+                               break
+                       }
+                       cc := v_0.Args[0]
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockARM64UGE
+                       b.SetControl(cc)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (FlagEQ) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagEQ {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (NE (FlagLT_ULT) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagLT_ULT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (FlagLT_UGT) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagLT_UGT {
+                               break
+                       }
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (NE (FlagGT_ULT) yes no)
                // cond:
-               // result: (EQ cc yes no)
+               // result: (First nil yes no)
                for {
                        v := b.Control
-                       if v.Op != OpARM64Equal {
+                       if v.Op != OpARM64FlagGT_ULT {
                                break
                        }
-                       cc := v.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64EQ
-                       b.SetControl(cc)
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
                        _ = yes
                        _ = no
                        return true
                }
-               // match: (If (NotEqual cc) yes no)
+               // match: (NE (FlagGT_UGT) yes no)
                // cond:
-               // result: (NE cc yes no)
+               // result: (First nil yes no)
                for {
                        v := b.Control
-                       if v.Op != OpARM64NotEqual {
+                       if v.Op != OpARM64FlagGT_UGT {
                                break
                        }
-                       cc := v.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64NE
-                       b.SetControl(cc)
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
                        _ = yes
                        _ = no
                        return true
                }
-               // match: (If (LessThan cc) yes no)
+               // match: (NE (InvertFlags cmp) yes no)
                // cond:
-               // result: (LT cc yes no)
+               // result: (NE cmp yes no)
                for {
                        v := b.Control
-                       if v.Op != OpARM64LessThan {
+                       if v.Op != OpARM64InvertFlags {
                                break
                        }
-                       cc := v.Args[0]
+                       cmp := v.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64LT
-                       b.SetControl(cc)
+                       b.Kind = BlockARM64NE
+                       b.SetControl(cmp)
                        _ = yes
                        _ = no
                        return true
                }
-               // match: (If (LessThanU cc) yes no)
+       case BlockARM64UGE:
+               // match: (UGE (FlagEQ) yes no)
                // cond:
-               // result: (ULT cc yes no)
+               // result: (First nil yes no)
                for {
                        v := b.Control
-                       if v.Op != OpARM64LessThanU {
+                       if v.Op != OpARM64FlagEQ {
                                break
                        }
-                       cc := v.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64ULT
-                       b.SetControl(cc)
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
                        _ = yes
                        _ = no
                        return true
                }
-               // match: (If (LessEqual cc) yes no)
+               // match: (UGE (FlagLT_ULT) yes no)
                // cond:
-               // result: (LE cc yes no)
+               // result: (First nil no yes)
                for {
                        v := b.Control
-                       if v.Op != OpARM64LessEqual {
+                       if v.Op != OpARM64FlagLT_ULT {
                                break
                        }
-                       cc := v.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64LE
-                       b.SetControl(cc)
-                       _ = yes
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
                        _ = no
+                       _ = yes
                        return true
                }
-               // match: (If (LessEqualU cc) yes no)
+               // match: (UGE (FlagLT_UGT) yes no)
                // cond:
-               // result: (ULE cc yes no)
+               // result: (First nil yes no)
                for {
                        v := b.Control
-                       if v.Op != OpARM64LessEqualU {
+                       if v.Op != OpARM64FlagLT_UGT {
                                break
                        }
-                       cc := v.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64ULE
-                       b.SetControl(cc)
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
                        _ = yes
                        _ = no
                        return true
                }
-               // match: (If (GreaterThan cc) yes no)
+               // match: (UGE (FlagGT_ULT) yes no)
                // cond:
-               // result: (GT cc yes no)
+               // result: (First nil no yes)
                for {
                        v := b.Control
-                       if v.Op != OpARM64GreaterThan {
+                       if v.Op != OpARM64FlagGT_ULT {
                                break
                        }
-                       cc := v.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64GT
-                       b.SetControl(cc)
-                       _ = yes
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
                        _ = no
+                       _ = yes
                        return true
                }
-               // match: (If (GreaterThanU cc) yes no)
+               // match: (UGE (FlagGT_UGT) yes no)
                // cond:
-               // result: (UGT cc yes no)
+               // result: (First nil yes no)
                for {
                        v := b.Control
-                       if v.Op != OpARM64GreaterThanU {
+                       if v.Op != OpARM64FlagGT_UGT {
                                break
                        }
-                       cc := v.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64UGT
-                       b.SetControl(cc)
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
                        _ = yes
                        _ = no
                        return true
                }
-               // match: (If (GreaterEqual cc) yes no)
+               // match: (UGE (InvertFlags cmp) yes no)
                // cond:
-               // result: (GE cc yes no)
+               // result: (ULE cmp yes no)
                for {
                        v := b.Control
-                       if v.Op != OpARM64GreaterEqual {
+                       if v.Op != OpARM64InvertFlags {
                                break
                        }
-                       cc := v.Args[0]
+                       cmp := v.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64GE
-                       b.SetControl(cc)
+                       b.Kind = BlockARM64ULE
+                       b.SetControl(cmp)
                        _ = yes
                        _ = no
                        return true
                }
-               // match: (If (GreaterEqualU cc) yes no)
+       case BlockARM64UGT:
+               // match: (UGT (FlagEQ) yes no)
                // cond:
-               // result: (UGE cc yes no)
+               // result: (First nil no yes)
                for {
                        v := b.Control
-                       if v.Op != OpARM64GreaterEqualU {
+                       if v.Op != OpARM64FlagEQ {
                                break
                        }
-                       cc := v.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64UGE
-                       b.SetControl(cc)
-                       _ = yes
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
                        _ = no
+                       _ = yes
                        return true
                }
-               // match: (If cond yes no)
+               // match: (UGT (FlagLT_ULT) yes no)
                // cond:
-               // result: (NE (CMPconst [0] cond) yes no)
+               // result: (First nil no yes)
                for {
                        v := b.Control
-                       cond := b.Control
+                       if v.Op != OpARM64FlagLT_ULT {
+                               break
+                       }
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64NE
-                       v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-                       v0.AuxInt = 0
-                       v0.AddArg(cond)
-                       b.SetControl(v0)
-                       _ = yes
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
                        _ = no
+                       _ = yes
                        return true
                }
-       case BlockARM64NE:
-               // match: (NE (CMPconst [0] (Equal cc)) yes no)
+               // match: (UGT (FlagLT_UGT) yes no)
                // cond:
-               // result: (EQ cc yes no)
+               // result: (First nil yes no)
                for {
                        v := b.Control
-                       if v.Op != OpARM64CMPconst {
-                               break
-                       }
-                       if v.AuxInt != 0 {
-                               break
-                       }
-                       v_0 := v.Args[0]
-                       if v_0.Op != OpARM64Equal {
+                       if v.Op != OpARM64FlagLT_UGT {
                                break
                        }
-                       cc := v_0.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64EQ
-                       b.SetControl(cc)
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
                        _ = yes
                        _ = no
                        return true
                }
-               // match: (NE (CMPconst [0] (NotEqual cc)) yes no)
+               // match: (UGT (FlagGT_ULT) yes no)
                // cond:
-               // result: (NE cc yes no)
+               // result: (First nil no yes)
                for {
                        v := b.Control
-                       if v.Op != OpARM64CMPconst {
-                               break
-                       }
-                       if v.AuxInt != 0 {
-                               break
-                       }
-                       v_0 := v.Args[0]
-                       if v_0.Op != OpARM64NotEqual {
+                       if v.Op != OpARM64FlagGT_ULT {
                                break
                        }
-                       cc := v_0.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64NE
-                       b.SetControl(cc)
-                       _ = yes
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
                        _ = no
+                       _ = yes
                        return true
                }
-               // match: (NE (CMPconst [0] (LessThan cc)) yes no)
+               // match: (UGT (FlagGT_UGT) yes no)
                // cond:
-               // result: (LT cc yes no)
+               // result: (First nil yes no)
                for {
                        v := b.Control
-                       if v.Op != OpARM64CMPconst {
+                       if v.Op != OpARM64FlagGT_UGT {
                                break
                        }
-                       if v.AuxInt != 0 {
-                               break
-                       }
-                       v_0 := v.Args[0]
-                       if v_0.Op != OpARM64LessThan {
-                               break
-                       }
-                       cc := v_0.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64LT
-                       b.SetControl(cc)
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
                        _ = yes
                        _ = no
                        return true
                }
-               // match: (NE (CMPconst [0] (LessThanU cc)) yes no)
+               // match: (UGT (InvertFlags cmp) yes no)
                // cond:
-               // result: (ULT cc yes no)
+               // result: (ULT cmp yes no)
                for {
                        v := b.Control
-                       if v.Op != OpARM64CMPconst {
-                               break
-                       }
-                       if v.AuxInt != 0 {
+                       if v.Op != OpARM64InvertFlags {
                                break
                        }
-                       v_0 := v.Args[0]
-                       if v_0.Op != OpARM64LessThanU {
-                               break
-                       }
-                       cc := v_0.Args[0]
+                       cmp := v.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
                        b.Kind = BlockARM64ULT
-                       b.SetControl(cc)
+                       b.SetControl(cmp)
                        _ = yes
                        _ = no
                        return true
                }
-               // match: (NE (CMPconst [0] (LessEqual cc)) yes no)
+       case BlockARM64ULE:
+               // match: (ULE (FlagEQ) yes no)
                // cond:
-               // result: (LE cc yes no)
+               // result: (First nil yes no)
                for {
                        v := b.Control
-                       if v.Op != OpARM64CMPconst {
-                               break
-                       }
-                       if v.AuxInt != 0 {
-                               break
-                       }
-                       v_0 := v.Args[0]
-                       if v_0.Op != OpARM64LessEqual {
+                       if v.Op != OpARM64FlagEQ {
                                break
                        }
-                       cc := v_0.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64LE
-                       b.SetControl(cc)
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
                        _ = yes
                        _ = no
                        return true
                }
-               // match: (NE (CMPconst [0] (LessEqualU cc)) yes no)
+               // match: (ULE (FlagLT_ULT) yes no)
                // cond:
-               // result: (ULE cc yes no)
+               // result: (First nil yes no)
                for {
                        v := b.Control
-                       if v.Op != OpARM64CMPconst {
+                       if v.Op != OpARM64FlagLT_ULT {
                                break
                        }
-                       if v.AuxInt != 0 {
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       _ = yes
+                       _ = no
+                       return true
+               }
+               // match: (ULE (FlagLT_UGT) yes no)
+               // cond:
+               // result: (First nil no yes)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagLT_UGT {
                                break
                        }
-                       v_0 := v.Args[0]
-                       if v_0.Op != OpARM64LessEqualU {
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (ULE (FlagGT_ULT) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagGT_ULT {
                                break
                        }
-                       cc := v_0.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64ULE
-                       b.SetControl(cc)
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
                        _ = yes
                        _ = no
                        return true
                }
-               // match: (NE (CMPconst [0] (GreaterThan cc)) yes no)
+               // match: (ULE (FlagGT_UGT) yes no)
                // cond:
-               // result: (GT cc yes no)
+               // result: (First nil no yes)
                for {
                        v := b.Control
-                       if v.Op != OpARM64CMPconst {
-                               break
-                       }
-                       if v.AuxInt != 0 {
+                       if v.Op != OpARM64FlagGT_UGT {
                                break
                        }
-                       v_0 := v.Args[0]
-                       if v_0.Op != OpARM64GreaterThan {
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (ULE (InvertFlags cmp) yes no)
+               // cond:
+               // result: (UGE cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64InvertFlags {
                                break
                        }
-                       cc := v_0.Args[0]
+                       cmp := v.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64GT
-                       b.SetControl(cc)
+                       b.Kind = BlockARM64UGE
+                       b.SetControl(cmp)
                        _ = yes
                        _ = no
                        return true
                }
-               // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no)
+       case BlockARM64ULT:
+               // match: (ULT (FlagEQ) yes no)
                // cond:
-               // result: (UGT cc yes no)
+               // result: (First nil no yes)
                for {
                        v := b.Control
-                       if v.Op != OpARM64CMPconst {
-                               break
-                       }
-                       if v.AuxInt != 0 {
+                       if v.Op != OpARM64FlagEQ {
                                break
                        }
-                       v_0 := v.Args[0]
-                       if v_0.Op != OpARM64GreaterThanU {
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (ULT (FlagLT_ULT) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagLT_ULT {
                                break
                        }
-                       cc := v_0.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64UGT
-                       b.SetControl(cc)
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
                        _ = yes
                        _ = no
                        return true
                }
-               // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no)
+               // match: (ULT (FlagLT_UGT) yes no)
                // cond:
-               // result: (GE cc yes no)
+               // result: (First nil no yes)
                for {
                        v := b.Control
-                       if v.Op != OpARM64CMPconst {
-                               break
-                       }
-                       if v.AuxInt != 0 {
+                       if v.Op != OpARM64FlagLT_UGT {
                                break
                        }
-                       v_0 := v.Args[0]
-                       if v_0.Op != OpARM64GreaterEqual {
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (ULT (FlagGT_ULT) yes no)
+               // cond:
+               // result: (First nil yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64FlagGT_ULT {
                                break
                        }
-                       cc := v_0.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64GE
-                       b.SetControl(cc)
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
                        _ = yes
                        _ = no
                        return true
                }
-               // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no)
+               // match: (ULT (FlagGT_UGT) yes no)
                // cond:
-               // result: (UGE cc yes no)
+               // result: (First nil no yes)
                for {
                        v := b.Control
-                       if v.Op != OpARM64CMPconst {
-                               break
-                       }
-                       if v.AuxInt != 0 {
+                       if v.Op != OpARM64FlagGT_UGT {
                                break
                        }
-                       v_0 := v.Args[0]
-                       if v_0.Op != OpARM64GreaterEqualU {
+                       yes := b.Succs[0]
+                       no := b.Succs[1]
+                       b.Kind = BlockFirst
+                       b.SetControl(nil)
+                       b.swapSuccessors()
+                       _ = no
+                       _ = yes
+                       return true
+               }
+               // match: (ULT (InvertFlags cmp) yes no)
+               // cond:
+               // result: (UGT cmp yes no)
+               for {
+                       v := b.Control
+                       if v.Op != OpARM64InvertFlags {
                                break
                        }
-                       cc := v_0.Args[0]
+                       cmp := v.Args[0]
                        yes := b.Succs[0]
                        no := b.Succs[1]
-                       b.Kind = BlockARM64UGE
-                       b.SetControl(cc)
+                       b.Kind = BlockARM64UGT
+                       b.SetControl(cmp)
                        _ = yes
                        _ = no
                        return true