var _ = math.MinInt8 // in case not otherwise used
func rewriteValueARM(v *Value, config *Config) bool {
switch v.Op {
+ case OpARMADC:
+ return rewriteValueARM_OpARMADC(v, config)
+ case OpARMADCconst:
+ return rewriteValueARM_OpARMADCconst(v, config)
case OpARMADD:
return rewriteValueARM_OpARMADD(v, config)
+ case OpARMADDS:
+ return rewriteValueARM_OpARMADDS(v, config)
case OpARMADDconst:
return rewriteValueARM_OpARMADDconst(v, config)
+ case OpARMAND:
+ return rewriteValueARM_OpARMAND(v, config)
+ case OpARMANDconst:
+ return rewriteValueARM_OpARMANDconst(v, config)
case OpAdd16:
return rewriteValueARM_OpAdd16(v, config)
case OpAdd32:
return rewriteValueARM_OpAnd8(v, config)
case OpAndB:
return rewriteValueARM_OpAndB(v, config)
+ case OpARMBIC:
+ return rewriteValueARM_OpARMBIC(v, config)
+ case OpARMBICconst:
+ return rewriteValueARM_OpARMBICconst(v, config)
+ case OpARMCMP:
+ return rewriteValueARM_OpARMCMP(v, config)
+ case OpARMCMPconst:
+ return rewriteValueARM_OpARMCMPconst(v, config)
case OpClosureCall:
return rewriteValueARM_OpClosureCall(v, config)
case OpCom16:
return rewriteValueARM_OpCvt64Fto32F(v, config)
case OpCvt64Fto32U:
return rewriteValueARM_OpCvt64Fto32U(v, config)
+ case OpARMDIV:
+ return rewriteValueARM_OpARMDIV(v, config)
+ case OpARMDIVU:
+ return rewriteValueARM_OpARMDIVU(v, config)
case OpDeferCall:
return rewriteValueARM_OpDeferCall(v, config)
case OpDiv16:
return rewriteValueARM_OpEqB(v, config)
case OpEqPtr:
return rewriteValueARM_OpEqPtr(v, config)
+ case OpARMEqual:
+ return rewriteValueARM_OpARMEqual(v, config)
case OpGeq16:
return rewriteValueARM_OpGeq16(v, config)
case OpGeq16U:
return rewriteValueARM_OpGreater8(v, config)
case OpGreater8U:
return rewriteValueARM_OpGreater8U(v, config)
+ case OpARMGreaterEqual:
+ return rewriteValueARM_OpARMGreaterEqual(v, config)
+ case OpARMGreaterEqualU:
+ return rewriteValueARM_OpARMGreaterEqualU(v, config)
+ case OpARMGreaterThan:
+ return rewriteValueARM_OpARMGreaterThan(v, config)
+ case OpARMGreaterThanU:
+ return rewriteValueARM_OpARMGreaterThanU(v, config)
case OpHmul16:
return rewriteValueARM_OpHmul16(v, config)
case OpHmul16u:
return rewriteValueARM_OpLess8(v, config)
case OpLess8U:
return rewriteValueARM_OpLess8U(v, config)
+ case OpARMLessEqual:
+ return rewriteValueARM_OpARMLessEqual(v, config)
+ case OpARMLessEqualU:
+ return rewriteValueARM_OpARMLessEqualU(v, config)
+ case OpARMLessThan:
+ return rewriteValueARM_OpARMLessThan(v, config)
+ case OpARMLessThanU:
+ return rewriteValueARM_OpARMLessThanU(v, config)
case OpLoad:
return rewriteValueARM_OpLoad(v, config)
+ case OpARMLoweredZeromask:
+ return rewriteValueARM_OpARMLoweredZeromask(v, config)
case OpLrot16:
return rewriteValueARM_OpLrot16(v, config)
case OpLrot32:
return rewriteValueARM_OpLsh8x8(v, config)
case OpARMMOVBUload:
return rewriteValueARM_OpARMMOVBUload(v, config)
+ case OpARMMOVBUreg:
+ return rewriteValueARM_OpARMMOVBUreg(v, config)
case OpARMMOVBload:
return rewriteValueARM_OpARMMOVBload(v, config)
+ case OpARMMOVBreg:
+ return rewriteValueARM_OpARMMOVBreg(v, config)
case OpARMMOVBstore:
return rewriteValueARM_OpARMMOVBstore(v, config)
case OpARMMOVDload:
return rewriteValueARM_OpARMMOVFstore(v, config)
case OpARMMOVHUload:
return rewriteValueARM_OpARMMOVHUload(v, config)
+ case OpARMMOVHUreg:
+ return rewriteValueARM_OpARMMOVHUreg(v, config)
case OpARMMOVHload:
return rewriteValueARM_OpARMMOVHload(v, config)
+ case OpARMMOVHreg:
+ return rewriteValueARM_OpARMMOVHreg(v, config)
case OpARMMOVHstore:
return rewriteValueARM_OpARMMOVHstore(v, config)
case OpARMMOVWload:
return rewriteValueARM_OpARMMOVWload(v, config)
case OpARMMOVWstore:
return rewriteValueARM_OpARMMOVWstore(v, config)
+ case OpARMMUL:
+ return rewriteValueARM_OpARMMUL(v, config)
+ case OpARMMULA:
+ return rewriteValueARM_OpARMMULA(v, config)
+ case OpARMMVN:
+ return rewriteValueARM_OpARMMVN(v, config)
case OpMod16:
return rewriteValueARM_OpMod16(v, config)
case OpMod16u:
return rewriteValueARM_OpNilCheck(v, config)
case OpNot:
return rewriteValueARM_OpNot(v, config)
+ case OpARMNotEqual:
+ return rewriteValueARM_OpARMNotEqual(v, config)
+ case OpARMOR:
+ return rewriteValueARM_OpARMOR(v, config)
+ case OpARMORconst:
+ return rewriteValueARM_OpARMORconst(v, config)
case OpOffPtr:
return rewriteValueARM_OpOffPtr(v, config)
case OpOr16:
return rewriteValueARM_OpOr8(v, config)
case OpOrB:
return rewriteValueARM_OpOrB(v, config)
+ case OpARMRSB:
+ return rewriteValueARM_OpARMRSB(v, config)
+ case OpARMRSBconst:
+ return rewriteValueARM_OpARMRSBconst(v, config)
+ case OpARMRSCconst:
+ return rewriteValueARM_OpARMRSCconst(v, config)
case OpRsh16Ux16:
return rewriteValueARM_OpRsh16Ux16(v, config)
case OpRsh16Ux32:
return rewriteValueARM_OpRsh8x64(v, config)
case OpRsh8x8:
return rewriteValueARM_OpRsh8x8(v, config)
+ case OpARMSBC:
+ return rewriteValueARM_OpARMSBC(v, config)
+ case OpARMSBCconst:
+ return rewriteValueARM_OpARMSBCconst(v, config)
+ case OpARMSLL:
+ return rewriteValueARM_OpARMSLL(v, config)
+ case OpARMSLLconst:
+ return rewriteValueARM_OpARMSLLconst(v, config)
+ case OpARMSRA:
+ return rewriteValueARM_OpARMSRA(v, config)
+ case OpARMSRAconst:
+ return rewriteValueARM_OpARMSRAconst(v, config)
+ case OpARMSRL:
+ return rewriteValueARM_OpARMSRL(v, config)
+ case OpARMSRLconst:
+ return rewriteValueARM_OpARMSRLconst(v, config)
+ case OpARMSUB:
+ return rewriteValueARM_OpARMSUB(v, config)
+ case OpARMSUBS:
+ return rewriteValueARM_OpARMSUBS(v, config)
+ case OpARMSUBconst:
+ return rewriteValueARM_OpARMSUBconst(v, config)
case OpSelect0:
return rewriteValueARM_OpSelect0(v, config)
case OpSelect1:
return rewriteValueARM_OpTrunc32to16(v, config)
case OpTrunc32to8:
return rewriteValueARM_OpTrunc32to8(v, config)
+ case OpARMXOR:
+ return rewriteValueARM_OpARMXOR(v, config)
+ case OpARMXORconst:
+ return rewriteValueARM_OpARMXORconst(v, config)
case OpXor16:
return rewriteValueARM_OpXor16(v, config)
case OpXor32:
}
return false
}
+func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADC (MOVWconst [c]) x flags)
+ // cond:
+ // result: (ADCconst [c] x flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC x (MOVWconst [c]) flags)
+ // cond:
+ // result: (ADCconst [c] x flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMADCconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADCconst [c] (ADDconst [d] x) flags)
+ // cond:
+ // result: (ADCconst [int64(int32(c+d))] x flags)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ flags := v.Args[1]
+ v.reset(OpARMADCconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADCconst [c] (SUBconst [d] x) flags)
+ // cond:
+ // result: (ADCconst [int64(int32(c-d))] x flags)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ flags := v.Args[1]
+ v.reset(OpARMADCconst)
+ v.AuxInt = int64(int32(c - d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
b := v.Block
_ = b
v.AddArg(x)
return true
}
+ // match: (ADD x (RSBconst [0] y))
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMRSBconst {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
// match: (ADD (MUL x y) a)
// cond:
// result: (MULA x y a)
}
return false
}
+func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDS (MOVWconst [c]) x)
+ // cond:
+ // result: (ADDSconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMADDSconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDS x (MOVWconst [c]))
+ // cond:
+ // result: (ADDSconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMADDSconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueARM_OpARMADDconst(v *Value, config *Config) bool {
b := v.Block
_ = b
v.AddArg(ptr)
return true
}
+ // match: (ADDconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(int32(c+d))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int32(c + d))
+ return true
+ }
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // cond:
+ // result: (ADDconst [int64(int32(c+d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (SUBconst [d] x))
+ // cond:
+ // result: (ADDconst [int64(int32(c-d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(int32(c - d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (RSBconst [d] x))
+ // cond:
+ // result: (RSBconst [int64(int32(c+d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AND (MOVWconst [c]) x)
+ // cond:
+ // result: (ANDconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (AND x (MOVWconst [c]))
+ // cond:
+ // result: (ANDconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMANDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (AND x x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (AND x (MVN y))
+ // cond:
+ // result: (BIC x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMVN {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpARMBIC)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDconst [0] _)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (ANDconst [c] x)
+ // cond: int32(c)==-1
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [c&d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c & d
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // cond:
+ // result: (ANDconst [c&d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c & d
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValueARM_OpAdd16(v *Value, config *Config) bool {
return true
}
}
-func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (ClosureCall [argwid] entry closure mem)
+ // match: (BIC x (MOVWconst [c]))
// cond:
- // result: (CALLclosure [argwid] entry closure mem)
+ // result: (BICconst [c] x)
for {
- argwid := v.AuxInt
- entry := v.Args[0]
- closure := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARMCALLclosure)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(closure)
- v.AddArg(mem)
- return true
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMBICconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (BIC x x)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVWconst [0])
+ for {
+ c := v.AuxInt
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (BICconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [d&^c])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = d &^ c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMP x (MOVWconst [c]))
+ // cond:
+ // result: (CMPconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMCMPconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMP (MOVWconst [c]) x)
+ // cond:
+ // result: (InvertFlags (CMPconst [c] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPconst (MOVWconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := v_0.AuxInt
+ y := v.AuxInt
+ if !(int32(x) == int32(y)) {
+ break
+ }
+ v.reset(OpARMFlagEQ)
+ return true
+ }
+ // match: (CMPconst (MOVWconst [x]) [y])
+ // cond: int32(x)<int32(y) && uint32(x)<uint32(y)
+ // result: (FlagLT_ULT)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := v_0.AuxInt
+ y := v.AuxInt
+ if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpARMFlagLT_ULT)
+ return true
+ }
+ // match: (CMPconst (MOVWconst [x]) [y])
+ // cond: int32(x)<int32(y) && uint32(x)>uint32(y)
+ // result: (FlagLT_UGT)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := v_0.AuxInt
+ y := v.AuxInt
+ if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpARMFlagLT_UGT)
+ return true
+ }
+ // match: (CMPconst (MOVWconst [x]) [y])
+ // cond: int32(x)>int32(y) && uint32(x)<uint32(y)
+ // result: (FlagGT_ULT)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := v_0.AuxInt
+ y := v.AuxInt
+ if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpARMFlagGT_ULT)
+ return true
+ }
+ // match: (CMPconst (MOVWconst [x]) [y])
+ // cond: int32(x)>int32(y) && uint32(x)>uint32(y)
+ // result: (FlagGT_UGT)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := v_0.AuxInt
+ y := v.AuxInt
+ if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpARMFlagGT_UGT)
+ return true
+ }
+ // match: (CMPconst (MOVBUreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagLT_ULT)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVBUreg {
+ break
+ }
+ c := v.AuxInt
+ if !(0xff < c) {
+ break
+ }
+ v.reset(OpARMFlagLT_ULT)
+ return true
+ }
+ // match: (CMPconst (MOVHUreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagLT_ULT)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVHUreg {
+ break
+ }
+ c := v.AuxInt
+ if !(0xffff < c) {
+ break
+ }
+ v.reset(OpARMFlagLT_ULT)
+ return true
+ }
+ // match: (CMPconst (ANDconst _ [m]) [n])
+ // cond: 0 <= int32(m) && int32(m) < int32(n)
+ // result: (FlagLT_ULT)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ m := v_0.AuxInt
+ n := v.AuxInt
+ if !(0 <= int32(m) && int32(m) < int32(n)) {
+ break
+ }
+ v.reset(OpARMFlagLT_ULT)
+ return true
+ }
+ // match: (CMPconst (SRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)
+ // result: (FlagLT_ULT)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ n := v.AuxInt
+ if !(0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)) {
+ break
+ }
+ v.reset(OpARMFlagLT_ULT)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMCALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
}
}
func rewriteValueARM_OpCom16(v *Value, config *Config) bool {
return true
}
}
+func rewriteValueARM_OpARMDIV(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DIV (MOVWconst [c]) (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(int32(c)/int32(d))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_1.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int32(c) / int32(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMDIVU(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DIVU x (MOVWconst [1]))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (DIVU x (MOVWconst [c]))
+ // cond: isPowerOfTwo(c)
+ // result: (SRLconst [log2(c)] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (DIVU (MOVWconst [c]) (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(uint32(c)/uint32(d))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_1.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(uint32(c) / uint32(d))
+ return true
+ }
+ return false
+}
func rewriteValueARM_OpDeferCall(v *Value, config *Config) bool {
b := v.Block
_ = b
return true
}
}
-func rewriteValueARM_OpGeq16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Geq16 x y)
+ // match: (Equal (FlagEQ))
// cond:
- // result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ // result: (MOVWconst [1])
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (Equal (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Equal (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Equal (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Equal (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Equal (InvertFlags x))
+ // cond:
+ // result: (Equal x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpGeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16 x y)
+ // cond:
+ // result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v1.AddArg(x)
return true
}
}
-func rewriteValueARM_OpHmul16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMGreaterEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Hmul16 x y)
+ // match: (GreaterEqual (FlagEQ))
// cond:
- // result: (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
+ // result: (MOVWconst [1])
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32())
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v.AuxInt = 16
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
-}
-func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16u x y)
+ // match: (GreaterEqual (FlagLT_ULT))
// cond:
- // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
+ // result: (MOVWconst [0])
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRLconst)
- v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v.AuxInt = 16
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
return true
}
-}
-func rewriteValueARM_OpHmul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32 x y)
+ // match: (GreaterEqual (FlagLT_UGT))
// cond:
- // result: (HMUL x y)
+ // result: (MOVWconst [0])
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMHMUL)
- v.AddArg(x)
- v.AddArg(y)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
return true
}
-}
-func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32u x y)
+ // match: (GreaterEqual (FlagGT_ULT))
// cond:
- // result: (HMULU x y)
+ // result: (MOVWconst [1])
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMHMULU)
- v.AddArg(x)
- v.AddArg(y)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
-}
-func rewriteValueARM_OpHmul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8 x y)
+ // match: (GreaterEqual (FlagGT_UGT))
// cond:
- // result: (SRAconst (MUL <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
+ // result: (MOVWconst [1])
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16())
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v.AuxInt = 8
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
-}
-func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8u x y)
+ // match: (GreaterEqual (InvertFlags x))
// cond:
- // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
+ // result: (LessEqual x)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRLconst)
- v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16())
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v.AuxInt = 8
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessEqual)
+ v.AddArg(x)
return true
}
+ return false
}
-func rewriteValueARM_OpInterCall(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMGreaterEqualU(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (InterCall [argwid] entry mem)
+ // match: (GreaterEqualU (FlagEQ))
// cond:
- // result: (CALLinter [argwid] entry mem)
+ // result: (MOVWconst [1])
for {
- argwid := v.AuxInt
- entry := v.Args[0]
- mem := v.Args[1]
- v.reset(OpARMCALLinter)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(mem)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
-}
-func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsInBounds idx len)
+ // match: (GreaterEqualU (FlagLT_ULT))
// cond:
- // result: (LessThanU (CMP idx len))
+ // result: (MOVWconst [0])
for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
return true
}
-}
-func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsNonNil ptr)
+ // match: (GreaterEqualU (FlagLT_UGT))
// cond:
- // result: (NotEqual (CMPconst [0] ptr))
+ // result: (MOVWconst [1])
for {
- ptr := v.Args[0]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v0.AuxInt = 0
- v0.AddArg(ptr)
- v.AddArg(v0)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
-}
-func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsSliceInBounds idx len)
+ // match: (GreaterEqualU (FlagGT_ULT))
// cond:
- // result: (LessEqualU (CMP idx len))
+ // result: (MOVWconst [0])
for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterEqualU (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterEqualU (InvertFlags x))
+ // cond:
+ // result: (LessEqualU x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessEqualU)
+ v.AddArg(x)
return true
}
+ return false
}
-func rewriteValueARM_OpLeq16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMGreaterThan(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Leq16 x y)
+ // match: (GreaterThan (FlagEQ))
// cond:
- // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ // result: (MOVWconst [0])
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThan (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThan (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThan (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterThan (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterThan (InvertFlags x))
+ // cond:
+ // result: (LessThan x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessThan)
+ v.AddArg(x)
return true
}
+ return false
}
-func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMGreaterThanU(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Leq16U x y)
+ // match: (GreaterThanU (FlagEQ))
// cond:
- // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ // result: (MOVWconst [0])
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThanU (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThanU (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterThanU (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThanU (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterThanU (InvertFlags x))
+ // cond:
+ // result: (LessThanU x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessThanU)
+ v.AddArg(x)
return true
}
+ return false
}
-func rewriteValueARM_OpLeq32(v *Value, config *Config) bool {
+func rewriteValueARM_OpHmul16(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Leq32 x y)
+ // match: (Hmul16 x y)
// cond:
- // result: (LessEqual (CMP x y))
+ // result: (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMLessEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v.reset(OpARMSRAconst)
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
v.AddArg(v0)
+ v.AuxInt = 16
return true
}
}
-func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Leq32F x y)
+ // match: (Hmul16u x y)
// cond:
- // result: (GreaterEqual (CMPF y x))
+ // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
+ v.reset(OpARMSRLconst)
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
v.AddArg(v0)
+ v.AuxInt = 16
return true
}
}
-func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool {
+func rewriteValueARM_OpHmul32(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Leq32U x y)
+ // match: (Hmul32 x y)
// cond:
- // result: (LessEqualU (CMP x y))
+ // result: (HMUL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v.reset(OpARMHMUL)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Leq64F x y)
+ // match: (Hmul32u x y)
// cond:
- // result: (GreaterEqual (CMPD y x))
+ // result: (HMULU x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
+ v.reset(OpARMHMULU)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpLeq8(v *Value, config *Config) bool {
+func rewriteValueARM_OpHmul8(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Leq8 x y)
+ // match: (Hmul8 x y)
// cond:
- // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ // result: (SRAconst (MUL <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMLessEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v.reset(OpARMSRAconst)
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16())
v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
+ v.AuxInt = 8
return true
}
}
-func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool {
+func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Leq8U x y)
+ // match: (Hmul8u x y)
// cond:
- // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v.reset(OpARMSRLconst)
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16())
v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
+ v.AuxInt = 8
return true
}
}
-func rewriteValueARM_OpLess16(v *Value, config *Config) bool {
+func rewriteValueARM_OpInterCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (InterCall [argwid] entry mem)
+ // cond:
+ // result: (CALLinter [argwid] entry mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMCALLinter)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsInBounds idx len)
+ // cond:
+ // result: (LessThanU (CMP idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsNonNil ptr)
+ // cond:
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v.Args[0]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsSliceInBounds idx len)
+ // cond:
+ // result: (LessEqualU (CMP idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16 x y)
+ // cond:
+ // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16U x y)
+ // cond:
+ // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32 x y)
+ // cond:
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32F x y)
+ // cond:
+ // result: (GreaterEqual (CMPF y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32U x y)
+ // cond:
+ // result: (LessEqualU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64F x y)
+ // cond:
+ // result: (GreaterEqual (CMPD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8 x y)
+ // cond:
+ // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8U x y)
+ // cond:
+ // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Less16 x y)
return true
}
}
-func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMLessEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Load <t> ptr mem)
- // cond: t.IsBoolean()
- // result: (MOVBUload ptr mem)
+ // match: (LessEqual (FlagEQ))
+ // cond:
+ // result: (MOVWconst [1])
for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(t.IsBoolean()) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
break
}
- v.reset(OpARMMOVBUload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
- // match: (Load <t> ptr mem)
- // cond: (is8BitInt(t) && isSigned(t))
- // result: (MOVBload ptr mem)
+ // match: (LessEqual (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is8BitInt(t) && isSigned(t)) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
break
}
- v.reset(OpARMMOVBload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
- // match: (Load <t> ptr mem)
- // cond: (is8BitInt(t) && !isSigned(t))
- // result: (MOVBUload ptr mem)
+ // match: (LessEqual (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is8BitInt(t) && !isSigned(t)) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
break
}
- v.reset(OpARMMOVBUload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
- // match: (Load <t> ptr mem)
- // cond: (is16BitInt(t) && isSigned(t))
- // result: (MOVHload ptr mem)
+ // match: (LessEqual (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is16BitInt(t) && isSigned(t)) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
break
}
- v.reset(OpARMMOVHload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
return true
}
- // match: (Load <t> ptr mem)
- // cond: (is16BitInt(t) && !isSigned(t))
- // result: (MOVHUload ptr mem)
+ // match: (LessEqual (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is16BitInt(t) && !isSigned(t)) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
break
}
- v.reset(OpARMMOVHUload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
return true
}
- // match: (Load <t> ptr mem)
- // cond: (is32BitInt(t) || isPtr(t))
- // result: (MOVWload ptr mem)
+ // match: (LessEqual (InvertFlags x))
+ // cond:
+ // result: (GreaterEqual x)
for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitInt(t) || isPtr(t)) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
break
}
- v.reset(OpARMMOVWload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterEqual)
+ v.AddArg(x)
return true
}
- // match: (Load <t> ptr mem)
- // cond: is32BitFloat(t)
- // result: (MOVFload ptr mem)
+ return false
+}
+func rewriteValueARM_OpARMLessEqualU(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LessEqualU (FlagEQ))
+ // cond:
+ // result: (MOVWconst [1])
for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitFloat(t)) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
break
}
- v.reset(OpARMMOVFload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
- // match: (Load <t> ptr mem)
- // cond: is64BitFloat(t)
- // result: (MOVDload ptr mem)
+ // match: (LessEqualU (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is64BitFloat(t)) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
break
}
- v.reset(OpARMMOVDload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
- return false
-}
-func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot16 <t> x [c])
+ // match: (LessEqualU (FlagLT_UGT))
// cond:
- // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
+ // result: (MOVWconst [0])
for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpARMOR)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
- v0.AddArg(x)
- v0.AuxInt = c & 15
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
- v1.AddArg(x)
- v1.AuxInt = 16 - c&15
- v.AddArg(v1)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
return true
}
-}
-func rewriteValueARM_OpLrot32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot32 x [c])
+ // match: (LessEqualU (FlagGT_ULT))
// cond:
- // result: (SRRconst x [32-c&31])
+ // result: (MOVWconst [1])
for {
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpARMSRRconst)
- v.AddArg(x)
- v.AuxInt = 32 - c&31
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
-}
-func rewriteValueARM_OpLrot8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot8 <t> x [c])
+ // match: (LessEqualU (FlagGT_UGT))
// cond:
- // result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
+ // result: (MOVWconst [0])
for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpARMOR)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
- v0.AddArg(x)
- v0.AuxInt = c & 7
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
- v1.AddArg(x)
- v1.AuxInt = 8 - c&7
- v.AddArg(v1)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
return true
}
-}
-func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x16 x y)
+ // match: (LessEqualU (InvertFlags x))
// cond:
- // result: (SLL x (ZeroExt16to32 y))
+ // result: (GreaterEqualU x)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterEqualU)
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
return true
}
+ return false
}
-func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMLessThan(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Lsh16x32 x y)
+ // match: (LessThan (FlagEQ))
// cond:
- // result: (SLL x y)
+ // result: (MOVWconst [0])
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v.AddArg(y)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
return true
}
-}
-func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x64 x (Const64 [c]))
- // cond: uint64(c) < 16
- // result: (SLLconst x [c])
+ // match: (LessThan (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
break
}
- c := v_1.AuxInt
- if !(uint64(c) < 16) {
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessThan (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
break
}
- v.reset(OpARMSLLconst)
- v.AddArg(x)
- v.AuxInt = c
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
- // match: (Lsh16x64 _ (Const64 [c]))
- // cond: uint64(c) >= 16
- // result: (Const16 [0])
+ // match: (LessThan (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
break
}
- c := v_1.AuxInt
- if !(uint64(c) >= 16) {
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessThan (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
break
}
- v.reset(OpConst16)
+ v.reset(OpARMMOVWconst)
v.AuxInt = 0
return true
}
- return false
-}
-func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x8 x y)
+ // match: (LessThan (InvertFlags x))
// cond:
- // result: (SLL x (ZeroExt8to32 y))
+ // result: (GreaterThan x)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterThan)
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
return true
}
+ return false
}
-func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMLessThanU(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Lsh32x16 x y)
+ // match: (LessThanU (FlagEQ))
// cond:
- // result: (SLL x (ZeroExt16to32 y))
+ // result: (MOVWconst [0])
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
return true
}
-}
-func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x32 x y)
+ // match: (LessThanU (FlagLT_ULT))
// cond:
- // result: (SLL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x64 x (Const64 [c]))
- // cond: uint64(c) < 32
- // result: (SLLconst x [c])
+ // result: (MOVWconst [1])
for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 32) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
break
}
- v.reset(OpARMSLLconst)
- v.AddArg(x)
- v.AuxInt = c
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
- // match: (Lsh32x64 _ (Const64 [c]))
- // cond: uint64(c) >= 32
- // result: (Const32 [0])
+ // match: (LessThanU (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 32) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
break
}
- v.reset(OpConst32)
+ v.reset(OpARMMOVWconst)
v.AuxInt = 0
return true
}
- return false
-}
-func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x8 x y)
- // cond:
- // result: (SLL x (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x16 x y)
- // cond:
- // result: (SLL x (ZeroExt16to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x32 x y)
+ // match: (LessThanU (FlagGT_ULT))
// cond:
- // result: (SLL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x64 x (Const64 [c]))
- // cond: uint64(c) < 8
- // result: (SLLconst x [c])
+ // result: (MOVWconst [1])
for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 8) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
break
}
- v.reset(OpARMSLLconst)
- v.AddArg(x)
- v.AuxInt = c
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
- // match: (Lsh8x64 _ (Const64 [c]))
- // cond: uint64(c) >= 8
- // result: (Const8 [0])
+ // match: (LessThanU (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 8) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
break
}
- v.reset(OpConst8)
+ v.reset(OpARMMOVWconst)
v.AuxInt = 0
return true
}
- return false
-}
-func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x8 x y)
+ // match: (LessThanU (InvertFlags x))
// cond:
- // result: (SLL x (ZeroExt8to32 y))
+ // result: (GreaterThanU x)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterThanU)
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
return true
}
+ return false
}
-func rewriteValueARM_OpARMMOVBUload(v *Value, config *Config) bool {
+func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVBUload [off1+off2] {sym} ptr mem)
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsBoolean()) {
break
}
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
v.reset(OpARMMOVBUload)
- v.AuxInt = off1 + off2
- v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
+ t := v.Type
+ ptr := v.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is8BitInt(t) && isSigned(t)) {
break
}
- v.reset(OpARMMOVBUload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.reset(OpARMMOVBload)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueARM_OpARMMOVBload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVBload [off1+off2] {sym} ptr mem)
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is8BitInt(t) && !isSigned(t)) {
break
}
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARMMOVBload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.reset(OpARMMOVBUload)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
+ t := v.Type
+ ptr := v.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is16BitInt(t) && isSigned(t)) {
break
}
- v.reset(OpARMMOVBload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.reset(OpARMMOVHload)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueARM_OpARMMOVBstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond:
- // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t) && !isSigned(t)) {
break
}
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARMMOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.reset(OpARMMOVHUload)
v.AddArg(ptr)
- v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVWload ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t) || isPtr(t)) {
break
}
- v.reset(OpARMMOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.reset(OpARMMOVWload)
v.AddArg(ptr)
- v.AddArg(val)
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueARM_OpARMMOVDload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVDload [off1+off2] {sym} ptr mem)
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVFload ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitFloat(t)) {
break
}
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARMMOVDload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.reset(OpARMMOVFload)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVDload ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
+ t := v.Type
+ ptr := v.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is64BitFloat(t)) {
break
}
v.reset(OpARMMOVDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
}
-func rewriteValueARM_OpARMMOVDstore(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMLoweredZeromask(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // match: (LoweredZeromask (MOVWconst [0]))
// cond:
- // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+ // result: (MOVWconst [0])
for {
- off1 := v.AuxInt
- sym := v.Aux
v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
+ if v_0.Op != OpARMMOVWconst {
break
}
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARMMOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ if v_0.AuxInt != 0 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
return true
}
- // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // match: (LoweredZeromask (MOVWconst [c]))
+ // cond: c != 0
+ // result: (MOVWconst [0xffffffff])
for {
- off1 := v.AuxInt
- sym1 := v.Aux
v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWaddr {
+ if v_0.Op != OpARMMOVWconst {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ c := v_0.AuxInt
+ if !(c != 0) {
break
}
- v.reset(OpARMMOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0xffffffff
return true
}
return false
}
-func rewriteValueARM_OpARMMOVFload(v *Value, config *Config) bool {
+func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // match: (Lrot16 <t> x [c])
// cond:
- // result: (MOVFload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARMMOVFload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARMMOVFload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ t := v.Type
+ x := v.Args[0]
+ c := v.AuxInt
+ v.reset(OpARMOR)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+ v0.AddArg(x)
+ v0.AuxInt = c & 15
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
+ v1.AddArg(x)
+ v1.AuxInt = 16 - c&15
+ v.AddArg(v1)
return true
}
- return false
}
-func rewriteValueARM_OpARMMOVFstore(v *Value, config *Config) bool {
+func rewriteValueARM_OpLrot32(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // match: (Lrot32 x [c])
// cond:
- // result: (MOVFstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARMMOVFstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // result: (SRRconst x [32-c&31])
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARMMOVFstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ x := v.Args[0]
+ c := v.AuxInt
+ v.reset(OpARMSRRconst)
+ v.AddArg(x)
+ v.AuxInt = 32 - c&31
return true
}
- return false
}
-func rewriteValueARM_OpARMMOVHUload(v *Value, config *Config) bool {
+func rewriteValueARM_OpLrot8(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // match: (Lrot8 <t> x [c])
// cond:
- // result: (MOVHUload [off1+off2] {sym} ptr mem)
+ // result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARMMOVHUload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ t := v.Type
+ x := v.Args[0]
+ c := v.AuxInt
+ v.reset(OpARMOR)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+ v0.AddArg(x)
+ v0.AuxInt = c & 7
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
+ v1.AddArg(x)
+ v1.AuxInt = 8 - c&7
+ v.AddArg(v1)
return true
}
- // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+}
+func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x16 x y)
+ // cond:
+ // result: (SLL x (ZeroExt16to32 y))
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARMMOVHUload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
- return false
}
-func rewriteValueARM_OpARMMOVHload(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // match: (Lsh16x32 x y)
// cond:
- // result: (MOVHload [off1+off2] {sym} ptr mem)
+ // result: (SLL x y)
for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
break
}
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARMMOVHload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AddArg(x)
+ v.AuxInt = c
return true
}
- // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWaddr {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
break
}
- v.reset(OpARMMOVHload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.reset(OpConst16)
+ v.AuxInt = 0
return true
}
return false
}
-func rewriteValueARM_OpARMMOVHstore(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // match: (Lsh16x8 x y)
// cond:
- // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+ // result: (SLL x (ZeroExt8to32 y))
for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARMMOVHstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
- // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+}
+func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x16 x y)
+ // cond:
+ // result: (SLL x (ZeroExt16to32 y))
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARMMOVHstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
- return false
}
-func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // match: (Lsh32x32 x y)
// cond:
- // result: (MOVWload [off1+off2] {sym} ptr mem)
+ // result: (SLL x y)
for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARMMOVWload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+}
+func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SLLconst x [c])
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWaddr {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
break
}
- v.reset(OpARMMOVWload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.reset(OpARMSLLconst)
+ v.AddArg(x)
+ v.AuxInt = c
return true
}
- return false
-}
-func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond:
- // result: (MOVWstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARMMOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWaddr {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
break
}
- v.reset(OpARMMOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.reset(OpConst32)
+ v.AuxInt = 0
return true
}
return false
}
-func rewriteValueARM_OpMod16(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Mod16 x y)
+ // match: (Lsh32x8 x y)
// cond:
- // result: (MOD (SignExt16to32 x) (SignExt16to32 y))
+ // result: (SLL x (ZeroExt8to32 y))
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMMOD)
- v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v0.AddArg(x)
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(y)
- v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpMod16u(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Mod16u x y)
+ // match: (Lsh8x16 x y)
// cond:
- // result: (MODU (ZeroExt16to32 x) (ZeroExt16to32 y))
+ // result: (SLL x (ZeroExt16to32 y))
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMMODU)
+ v.reset(OpARMSLL)
+ v.AddArg(x)
v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(x)
+ v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpMod32(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Mod32 x y)
+ // match: (Lsh8x32 x y)
// cond:
- // result: (MOD x y)
+ // result: (SLL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMMOD)
+ v.reset(OpARMSLL)
v.AddArg(x)
v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpMod32u(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Mod32u x y)
- // cond:
- // result: (MODU x y)
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SLLconst x [c])
for {
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMODU)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSLLconst)
v.AddArg(x)
- v.AddArg(y)
+ v.AuxInt = c
return true
}
-}
-func rewriteValueARM_OpMod8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod8 x y)
- // cond:
- // result: (MOD (SignExt8to32 x) (SignExt8to32 y))
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMOD)
- v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(y)
- v.AddArg(v1)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
return true
}
+ return false
}
-func rewriteValueARM_OpMod8u(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Mod8u x y)
+ // match: (Lsh8x8 x y)
// cond:
- // result: (MODU (ZeroExt8to32 x) (ZeroExt8to32 y))
+ // result: (SLL x (ZeroExt8to32 y))
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMMODU)
+ v.reset(OpARMSLL)
+ v.AddArg(x)
v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(x)
+ v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpMove(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVBUload(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Move [s] _ _ mem)
- // cond: SizeAndAlign(s).Size() == 0
- // result: mem
+ // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVBUload [off1+off2] {sym} ptr mem)
for {
- s := v.AuxInt
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 0) {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
break
}
- v.reset(OpCopy)
- v.Type = mem.Type
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 1
- // result: (MOVBstore dst (MOVBUload src mem) mem)
+ // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 1) {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
break
}
- v.reset(OpARMMOVBstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
- // result: (MOVHstore dst (MOVHUload src mem) mem)
+ // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)
+ // result: x
for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVBstore {
break
}
- v.reset(OpARMMOVHstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
return true
}
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 2
- // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))
+ return false
+}
+func rewriteValueARM_OpARMMOVBUreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // cond:
+ // result: (MOVWreg x)
for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 2) {
+ x := v.Args[0]
+ if x.Op != OpARMMOVBUload {
break
}
- v.reset(OpARMMOVBstore)
- v.AuxInt = 1
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v0.AuxInt = 1
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
- // result: (MOVWstore dst (MOVWload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
- break
- }
- v.reset(OpARMMOVWstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVWload, config.fe.TypeUInt32())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
return true
}
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
- // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
+ // match: (MOVBUreg (ANDconst [c] x))
+ // cond:
+ // result: (ANDconst [c&0xff] x)
for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMANDconst {
break
}
- v.reset(OpARMMOVHstore)
- v.AuxInt = 2
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
- v0.AuxInt = 2
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c & 0xff
+ v.AddArg(x)
return true
}
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 4
- // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))))
+ return false
+}
+func rewriteValueARM_OpARMMOVBload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 4) {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
break
}
- v.reset(OpARMMOVBstore)
- v.AuxInt = 3
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v0.AuxInt = 3
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v1.AuxInt = 2
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v2.AuxInt = 2
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v3.AuxInt = 1
- v3.AddArg(dst)
- v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v4.AuxInt = 1
- v4.AddArg(src)
- v4.AddArg(mem)
- v3.AddArg(v4)
- v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v5.AddArg(dst)
- v6 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v6.AddArg(src)
- v6.AddArg(mem)
- v5.AddArg(v6)
- v5.AddArg(mem)
- v3.AddArg(v5)
- v1.AddArg(v3)
- v.AddArg(v1)
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 3
- // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))
+ // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 3) {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
break
}
- v.reset(OpARMMOVBstore)
- v.AuxInt = 2
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v0.AuxInt = 2
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v1.AuxInt = 1
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v2.AuxInt = 1
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v3.AddArg(dst)
- v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v4.AddArg(src)
- v4.AddArg(mem)
- v3.AddArg(v4)
- v3.AddArg(mem)
- v1.AddArg(v3)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0
- // result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0) {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpARMDUFFCOPY)
- v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/4))
- v.AddArg(dst)
- v.AddArg(src)
+ v.reset(OpARMMOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0
- // result: (LoweredMove dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()]) mem)
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)
+ // result: x
for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0) {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVBstore {
break
}
- v.reset(OpARMLoweredMove)
- v.AddArg(dst)
- v.AddArg(src)
- v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
- v0.AddArg(src)
- v0.AuxInt = SizeAndAlign(s).Size()
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0
- // result: (LoweredMoveU dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()]) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0) {
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) {
break
}
- v.reset(OpARMLoweredMoveU)
- v.AddArg(dst)
- v.AddArg(src)
- v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
- v0.AddArg(src)
- v0.AuxInt = SizeAndAlign(s).Size()
- v.AddArg(v0)
- v.AddArg(mem)
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
return true
}
return false
}
-func rewriteValueARM_OpMul16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVBreg(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Mul16 x y)
+ // match: (MOVBreg x:(MOVBload _ _))
// cond:
- // result: (MUL x y)
+ // result: (MOVWreg x)
for {
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMUL)
+ if x.Op != OpARMMOVBload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
v.AddArg(x)
- v.AddArg(y)
return true
}
-}
-func rewriteValueARM_OpMul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32 x y)
- // cond:
- // result: (MUL x y)
+ // match: (MOVBreg (ANDconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDconst [c&0x7f] x)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMUL)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = c & 0x7f
v.AddArg(x)
- v.AddArg(y)
return true
}
+ return false
}
-func rewriteValueARM_OpMul32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVBstore(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Mul32F x y)
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond:
- // result: (MULF x y)
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMULF)
- v.AddArg(x)
- v.AddArg(y)
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
return true
}
-}
-func rewriteValueARM_OpMul32uhilo(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32uhilo x y)
- // cond:
- // result: (MULLU x y)
+ // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMULLU)
- v.AddArg(x)
- v.AddArg(y)
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
return true
}
-}
-func rewriteValueARM_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul64F x y)
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
// cond:
- // result: (MULD x y)
+ // result: (MOVBstore [off] {sym} ptr x mem)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMULD)
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v.AddArg(y)
+ v.AddArg(mem)
return true
}
-}
-func rewriteValueARM_OpMul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul8 x y)
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
// cond:
- // result: (MUL x y)
+ // result: (MOVBstore [off] {sym} ptr x mem)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMUL)
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v.AddArg(y)
+ v.AddArg(mem)
return true
}
-}
-func rewriteValueARM_OpNeg16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg16 x)
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
// cond:
- // result: (RSBconst [0] x)
+ // result: (MOVBstore [off] {sym} ptr x mem)
for {
- x := v.Args[0]
- v.reset(OpARMRSBconst)
- v.AuxInt = 0
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
+ v.AddArg(mem)
return true
}
-}
-func rewriteValueARM_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg32 x)
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
// cond:
- // result: (RSBconst [0] x)
+ // result: (MOVBstore [off] {sym} ptr x mem)
for {
- x := v.Args[0]
- v.reset(OpARMRSBconst)
- v.AuxInt = 0
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
+ v.AddArg(mem)
return true
}
+ return false
}
-func rewriteValueARM_OpNeg32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVDload(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Neg32F x)
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond:
- // result: (MULF (MOVFconst [int64(math.Float64bits(-1))]) x)
+ // result: (MOVDload [off1+off2] {sym} ptr mem)
for {
- x := v.Args[0]
- v.reset(OpARMMULF)
- v0 := b.NewValue0(v.Line, OpARMMOVFconst, config.fe.TypeFloat32())
- v0.AuxInt = int64(math.Float64bits(-1))
- v.AddArg(v0)
- v.AddArg(x)
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
-}
-func rewriteValueARM_OpNeg64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg64F x)
- // cond:
- // result: (MULD (MOVDconst [int64(math.Float64bits(-1))]) x)
+ // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
- x := v.Args[0]
- v.reset(OpARMMULD)
- v0 := b.NewValue0(v.Line, OpARMMOVDconst, config.fe.TypeFloat64())
- v0.AuxInt = int64(math.Float64bits(-1))
- v.AddArg(v0)
- v.AddArg(x)
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
-}
-func rewriteValueARM_OpNeg8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg8 x)
- // cond:
- // result: (RSBconst [0] x)
+ // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
for {
- x := v.Args[0]
- v.reset(OpARMRSBconst)
- v.AuxInt = 0
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVDstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
v.AddArg(x)
return true
}
+ return false
}
-func rewriteValueARM_OpNeq16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVDstore(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Neq16 x y)
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond:
- // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ // result: (MOVDstore [off1+off2] {sym} ptr val mem)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpNeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq32 x y)
- // cond:
- // result: (NotEqual (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpNeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq32F x y)
- // cond:
- // result: (NotEqual (CMPF x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpNeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq64F x y)
- // cond:
- // result: (NotEqual (CMPD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpNeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq8 x y)
- // cond:
- // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
return true
}
-}
-func rewriteValueARM_OpNeqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NeqB x y)
- // cond:
- // result: (XOR x y)
+ // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMXOR)
- v.AddArg(x)
- v.AddArg(y)
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
return true
}
+ return false
}
-func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVFload(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (NeqPtr x y)
+ // match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond:
- // result: (NotEqual (CMP x y))
+ // result: (MOVFload [off1+off2] {sym} ptr mem)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVFload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
-}
-func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NilCheck ptr mem)
- // cond:
- // result: (LoweredNilCheck ptr mem)
+ // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
- ptr := v.Args[0]
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
mem := v.Args[1]
- v.reset(OpARMLoweredNilCheck)
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVFload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
-}
-func rewriteValueARM_OpNot(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Not x)
- // cond:
- // result: (XORconst [1] x)
+ // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
for {
- x := v.Args[0]
- v.reset(OpARMXORconst)
- v.AuxInt = 1
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVFstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
v.AddArg(x)
return true
}
+ return false
}
-func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVFstore(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (OffPtr [off] ptr:(SP))
+ // match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond:
- // result: (MOVWaddr [off] ptr)
+ // result: (MOVFstore [off1+off2] {sym} ptr val mem)
for {
- off := v.AuxInt
- ptr := v.Args[0]
- if ptr.Op != OpSP {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
break
}
- v.reset(OpARMMOVWaddr)
- v.AuxInt = off
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVFstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
return true
}
- // match: (OffPtr [off] ptr)
- // cond:
- // result: (ADDconst [off] ptr)
+ // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
- off := v.AuxInt
- ptr := v.Args[0]
- v.reset(OpARMADDconst)
- v.AuxInt = off
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVFstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
return true
}
+ return false
}
-func rewriteValueARM_OpOr16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVHUload(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Or16 x y)
+ // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond:
- // result: (OR x y)
+ // result: (MOVHUload [off1+off2] {sym} ptr mem)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMOR)
- v.AddArg(x)
- v.AddArg(y)
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
-}
-func rewriteValueARM_OpOr32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or32 x y)
- // cond:
- // result: (OR x y)
+ // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMOR)
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVHstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
v.AddArg(x)
- v.AddArg(y)
return true
}
+ return false
}
-func rewriteValueARM_OpOr8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVHUreg(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Or8 x y)
+ // match: (MOVHUreg x:(MOVBUload _ _))
// cond:
- // result: (OR x y)
+ // result: (MOVWreg x)
for {
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMOR)
+ if x.Op != OpARMMOVBUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
v.AddArg(x)
- v.AddArg(y)
return true
}
-}
-func rewriteValueARM_OpOrB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (OrB x y)
+ // match: (MOVHUreg x:(MOVHUload _ _))
// cond:
- // result: (OR x y)
+ // result: (MOVWreg x)
for {
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMOR)
+ if x.Op != OpARMMOVHUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
v.AddArg(x)
- v.AddArg(y)
return true
}
-}
-func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux16 x y)
+ // match: (MOVHUreg (ANDconst [c] x))
// cond:
- // result: (SRL (ZeroExt16to32 x) (ZeroExt16to32 y))
+ // result: (ANDconst [c&0xffff] x)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c & 0xffff
+ v.AddArg(x)
return true
}
+ return false
}
-func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVHload(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Rsh16Ux32 x y)
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond:
- // result: (SRL (ZeroExt16to32 x) y)
+ // result: (MOVHload [off1+off2] {sym} ptr mem)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(y)
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVHload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
-}
-func rewriteValueARM_OpRsh16Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux64 x (Const64 [c]))
- // cond: uint64(c) < 16
- // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+ // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
break
}
- c := v_1.AuxInt
- if !(uint64(c) < 16) {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpARMSRLconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 16
- v.AddArg(v0)
- v.AuxInt = c + 16
+ v.reset(OpARMMOVHload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
- // match: (Rsh16Ux64 _ (Const64 [c]))
- // cond: uint64(c) >= 16
- // result: (Const16 [0])
+ // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)
+ // result: x
for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
+ if v_1.Op != OpARMMOVHstore {
break
}
- c := v_1.AuxInt
- if !(uint64(c) >= 16) {
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) {
break
}
- v.reset(OpConst16)
- v.AuxInt = 0
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
return true
}
return false
}
-func rewriteValueARM_OpRsh16Ux8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVHreg(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Rsh16Ux8 x y)
+ // match: (MOVHreg x:(MOVBload _ _))
// cond:
- // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
+ // result: (MOVWreg x)
for {
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
+ if x.Op != OpARMMOVBload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
return true
}
-}
-func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x16 x y)
+ // match: (MOVHreg x:(MOVBUload _ _))
// cond:
- // result: (SRA (SignExt16to32 x) (ZeroExt16to32 y))
+ // result: (MOVWreg x)
for {
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRA)
- v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
+ if x.Op != OpARMMOVBUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
return true
}
-}
-func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x32 x y)
+ // match: (MOVHreg x:(MOVHload _ _))
// cond:
- // result: (SRA (SignExt16to32 x) y)
+ // result: (MOVWreg x)
for {
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRA)
- v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(y)
+ if x.Op != OpARMMOVHload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
return true
}
-}
-func rewriteValueARM_OpRsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x64 x (Const64 [c]))
- // cond: uint64(c) < 16
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+ // match: (MOVHreg (ANDconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDconst [c&0x7fff] x)
for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMANDconst {
break
}
- c := v_1.AuxInt
- if !(uint64(c) < 16) {
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
break
}
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 16
- v.AddArg(v0)
- v.AuxInt = c + 16
- return true
- }
- // match: (Rsh16x64 x (Const64 [c]))
- // cond: uint64(c) >= 16
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 16) {
- break
- }
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 16
- v.AddArg(v0)
- v.AuxInt = 31
+ v.reset(OpARMANDconst)
+ v.AuxInt = c & 0x7fff
+ v.AddArg(x)
return true
}
return false
}
-func rewriteValueARM_OpRsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x8 x y)
- // cond:
- // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRA)
- v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux16 x y)
- // cond:
- // result: (SRL x (ZeroExt16to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVHstore(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Rsh32Ux32 x y)
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond:
- // result: (SRL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpRsh32Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux64 x (Const64 [c]))
- // cond: uint64(c) < 32
- // result: (SRLconst x [c])
+ // result: (MOVHstore [off1+off2] {sym} ptr val mem)
for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 32) {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
break
}
- v.reset(OpARMSRLconst)
- v.AddArg(x)
- v.AuxInt = c
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
return true
}
- // match: (Rsh32Ux64 _ (Const64 [c]))
- // cond: uint64(c) >= 32
- // result: (Const32 [0])
+ // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
break
}
- c := v_1.AuxInt
- if !(uint64(c) >= 32) {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpConst32)
- v.AuxInt = 0
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueARM_OpRsh32Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux8 x y)
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
// cond:
- // result: (SRL x (ZeroExt8to32 y))
+ // result: (MOVHstore [off] {sym} ptr x mem)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRL)
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg(mem)
return true
}
-}
-func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x16 x y)
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
// cond:
- // result: (SRA x (ZeroExt16to32 y))
+ // result: (MOVHstore [off] {sym} ptr x mem)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRA)
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg(mem)
return true
}
+ return false
}
-func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Rsh32x32 x y)
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond:
- // result: (SRA x y)
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRA)
- v.AddArg(x)
- v.AddArg(y)
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
-}
-func rewriteValueARM_OpRsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x64 x (Const64 [c]))
- // cond: uint64(c) < 32
- // result: (SRAconst x [c])
+ // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
break
}
- c := v_1.AuxInt
- if !(uint64(c) < 32) {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpARMSRAconst)
- v.AddArg(x)
- v.AuxInt = c
+ v.reset(OpARMMOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
- // match: (Rsh32x64 x (Const64 [c]))
- // cond: uint64(c) >= 32
- // result: (SRAconst x [31])
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
for {
- x := v.Args[0]
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
+ if v_1.Op != OpARMMOVWstore {
break
}
- c := v_1.AuxInt
- if !(uint64(c) >= 32) {
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
- v.reset(OpARMSRAconst)
+ v.reset(OpCopy)
+ v.Type = x.Type
v.AddArg(x)
- v.AuxInt = 31
return true
}
return false
}
-func rewriteValueARM_OpRsh32x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Rsh32x8 x y)
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond:
- // result: (SRA x (ZeroExt8to32 y))
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRA)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
return true
}
+ return false
}
-func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Rsh8Ux16 x y)
+ // match: (MUL x (MOVWconst [-1]))
// cond:
- // result: (SRL (ZeroExt8to32 x) (ZeroExt16to32 y))
+ // result: (RSBconst [0] x)
for {
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ if v_1.AuxInt != -1 {
+ break
+ }
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
+ v.AddArg(x)
return true
}
-}
-func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux32 x y)
+ // match: (MUL _ (MOVWconst [0]))
// cond:
- // result: (SRL (ZeroExt8to32 x) y)
+ // result: (MOVWconst [0])
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(y)
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
return true
}
-}
-func rewriteValueARM_OpRsh8Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux64 x (Const64 [c]))
- // cond: uint64(c) < 8
- // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+ // match: (MUL x (MOVWconst [1]))
+ // cond:
+ // result: x
for {
x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
+ if v_1.Op != OpARMMOVWconst {
break
}
- c := v_1.AuxInt
- if !(uint64(c) < 8) {
+ if v_1.AuxInt != 1 {
break
}
- v.reset(OpARMSRLconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 24
- v.AddArg(v0)
- v.AuxInt = c + 24
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
return true
}
- // match: (Rsh8Ux64 _ (Const64 [c]))
- // cond: uint64(c) >= 8
- // result: (Const8 [0])
+ // match: (MUL x (MOVWconst [c]))
+ // cond: isPowerOfTwo(c)
+ // result: (SLLconst [log2(c)] x)
for {
+ x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
+ if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- if !(uint64(c) >= 8) {
+ if !(isPowerOfTwo(c)) {
break
}
- v.reset(OpConst8)
- v.AuxInt = 0
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
return true
}
- return false
-}
-func rewriteValueARM_OpRsh8Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux8 x y)
+ // match: (MUL (MOVWconst [-1]) x)
// cond:
- // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
+ // result: (RSBconst [0] x)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ if v_0.AuxInt != -1 {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
+ v.AddArg(x)
return true
}
-}
-func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x16 x y)
+ // match: (MUL (MOVWconst [0]) _)
// cond:
- // result: (SRA (SignExt8to32 x) (ZeroExt16to32 y))
+ // result: (MOVWconst [0])
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRA)
- v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
return true
}
-}
-func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x32 x y)
+ // match: (MUL (MOVWconst [1]) x)
// cond:
- // result: (SRA (SignExt8to32 x) y)
+ // result: x
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRA)
- v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(y)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ if v_0.AuxInt != 1 {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
return true
}
-}
-func rewriteValueARM_OpRsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x64 x (Const64 [c]))
- // cond: uint64(c) < 8
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+ // match: (MUL (MOVWconst [c]) x)
+ // cond: isPowerOfTwo(c)
+ // result: (SLLconst [log2(c)] x)
for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
break
}
- c := v_1.AuxInt
- if !(uint64(c) < 8) {
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isPowerOfTwo(c)) {
break
}
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 24
- v.AddArg(v0)
- v.AuxInt = c + 24
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
return true
}
- // match: (Rsh8x64 x (Const64 [c]))
- // cond: uint64(c) >= 8
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
+ // match: (MUL (MOVWconst [c]) (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(int32(c*d))])
for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
break
}
- c := v_1.AuxInt
- if !(uint64(c) >= 8) {
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
break
}
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 24
- v.AddArg(v0)
- v.AuxInt = 31
+ d := v_1.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int32(c * d))
return true
}
return false
}
-func rewriteValueARM_OpRsh8x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Rsh8x8 x y)
+ // match: (MULA x (MOVWconst [-1]) a)
// cond:
- // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
+ // result: (SUB a x)
for {
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRA)
- v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ if v_1.AuxInt != -1 {
+ break
+ }
+ a := v.Args[2]
+ v.reset(OpARMSUB)
+ v.AddArg(a)
+ v.AddArg(x)
return true
}
-}
-func rewriteValueARM_OpSelect0(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Select0 <t> x)
- // cond: t.IsFlags()
- // result: (Carry x)
+ // match: (MULA _ (MOVWconst [0]) a)
+ // cond:
+ // result: a
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ a := v.Args[2]
+ v.reset(OpCopy)
+ v.Type = a.Type
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [1]) a)
+ // cond:
+ // result: (ADD x a)
for {
- t := v.Type
x := v.Args[0]
- if !(t.IsFlags()) {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
break
}
- v.reset(OpARMCarry)
+ if v_1.AuxInt != 1 {
+ break
+ }
+ a := v.Args[2]
+ v.reset(OpARMADD)
v.AddArg(x)
+ v.AddArg(a)
return true
}
- // match: (Select0 <t> x)
- // cond: !t.IsFlags()
- // result: (LoweredSelect0 x)
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c)] x) a)
for {
- t := v.Type
x := v.Args[0]
- if !(!t.IsFlags()) {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
break
}
- v.reset(OpARMLoweredSelect0)
+ c := v_1.AuxInt
+ a := v.Args[2]
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [-1]) x a)
+ // cond:
+ // result: (SUB a x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ if v_0.AuxInt != -1 {
+ break
+ }
+ x := v.Args[1]
+ a := v.Args[2]
+ v.reset(OpARMSUB)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULA (MOVWconst [0]) _ a)
+ // cond:
+ // result: a
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ a := v.Args[2]
+ v.reset(OpCopy)
+ v.Type = a.Type
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [1]) x a)
+ // cond:
+ // result: (ADD x a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ if v_0.AuxInt != 1 {
+ break
+ }
+ x := v.Args[1]
+ a := v.Args[2]
+ v.reset(OpARMADD)
v.AddArg(x)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c)] x) a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ a := v.Args[2]
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) (MOVWconst [d]) a)
+ // cond:
+ // result: (ADDconst [int64(int32(c*d))] a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_1.AuxInt
+ a := v.Args[2]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(int32(c * d))
+ v.AddArg(a)
return true
}
return false
}
-func rewriteValueARM_OpSelect1(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMVN(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Select1 x)
+ // match: (MVN (MOVWconst [c]))
// cond:
- // result: (LoweredSelect1 x)
+ // result: (MOVWconst [^c])
for {
- x := v.Args[0]
- v.reset(OpARMLoweredSelect1)
- v.AddArg(x)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = ^c
return true
}
+ return false
}
-func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool {
+func rewriteValueARM_OpMod16(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (SignExt16to32 x)
+ // match: (Mod16 x y)
// cond:
- // result: (MOVHreg x)
+ // result: (MOD (SignExt16to32 x) (SignExt16to32 y))
for {
x := v.Args[0]
- v.reset(OpARMMOVHreg)
- v.AddArg(x)
+ y := v.Args[1]
+ v.reset(OpARMMOD)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpSignExt8to16(v *Value, config *Config) bool {
+func rewriteValueARM_OpMod16u(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (SignExt8to16 x)
+ // match: (Mod16u x y)
// cond:
- // result: (MOVBreg x)
+ // result: (MODU (ZeroExt16to32 x) (ZeroExt16to32 y))
for {
x := v.Args[0]
- v.reset(OpARMMOVBreg)
- v.AddArg(x)
+ y := v.Args[1]
+ v.reset(OpARMMODU)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpSignExt8to32(v *Value, config *Config) bool {
+func rewriteValueARM_OpMod32(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (SignExt8to32 x)
+ // match: (Mod32 x y)
// cond:
- // result: (MOVBreg x)
+ // result: (MOD x y)
for {
x := v.Args[0]
- v.reset(OpARMMOVBreg)
+ y := v.Args[1]
+ v.reset(OpARMMOD)
v.AddArg(x)
+ v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpSignmask(v *Value, config *Config) bool {
+func rewriteValueARM_OpMod32u(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Signmask x)
+ // match: (Mod32u x y)
// cond:
- // result: (SRAconst x [31])
+ // result: (MODU x y)
for {
x := v.Args[0]
- v.reset(OpARMSRAconst)
+ y := v.Args[1]
+ v.reset(OpARMMODU)
v.AddArg(x)
- v.AuxInt = 31
+ v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpSqrt(v *Value, config *Config) bool {
+func rewriteValueARM_OpMod8(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Sqrt x)
+ // match: (Mod8 x y)
// cond:
- // result: (SQRTD x)
+ // result: (MOD (SignExt8to32 x) (SignExt8to32 y))
for {
x := v.Args[0]
- v.reset(OpARMSQRTD)
- v.AddArg(x)
+ y := v.Args[1]
+ v.reset(OpARMMOD)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpStaticCall(v *Value, config *Config) bool {
+func rewriteValueARM_OpMod8u(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (StaticCall [argwid] {target} mem)
+ // match: (Mod8u x y)
// cond:
- // result: (CALLstatic [argwid] {target} mem)
+ // result: (MODU (ZeroExt8to32 x) (ZeroExt8to32 y))
for {
- argwid := v.AuxInt
- target := v.Aux
- mem := v.Args[0]
- v.reset(OpARMCALLstatic)
- v.AuxInt = argwid
- v.Aux = target
- v.AddArg(mem)
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMODU)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpStore(v *Value, config *Config) bool {
+func rewriteValueARM_OpMove(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Store [1] ptr val mem)
- // cond:
- // result: (MOVBstore ptr val mem)
+ // match: (Move [s] _ _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
for {
- if v.AuxInt != 1 {
+ s := v.AuxInt
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 0) {
break
}
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARMMOVBstore)
- v.AddArg(ptr)
- v.AddArg(val)
+ v.reset(OpCopy)
+ v.Type = mem.Type
v.AddArg(mem)
return true
}
- // match: (Store [2] ptr val mem)
- // cond:
- // result: (MOVHstore ptr val mem)
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstore dst (MOVBUload src mem) mem)
for {
- if v.AuxInt != 2 {
- break
- }
- ptr := v.Args[0]
- val := v.Args[1]
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
mem := v.Args[2]
- v.reset(OpARMMOVHstore)
- v.AddArg(ptr)
- v.AddArg(val)
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (Store [4] ptr val mem)
- // cond: !is32BitFloat(val.Type)
- // result: (MOVWstore ptr val mem)
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore dst (MOVHUload src mem) mem)
for {
- if v.AuxInt != 4 {
- break
- }
- ptr := v.Args[0]
- val := v.Args[1]
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
mem := v.Args[2]
- if !(!is32BitFloat(val.Type)) {
+ if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
break
}
- v.reset(OpARMMOVWstore)
- v.AddArg(ptr)
- v.AddArg(val)
+ v.reset(OpARMMOVHstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (Store [4] ptr val mem)
- // cond: is32BitFloat(val.Type)
- // result: (MOVFstore ptr val mem)
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))
for {
- if v.AuxInt != 4 {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2) {
break
}
- ptr := v.Args[0]
- val := v.Args[1]
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 1
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0.AuxInt = 1
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
mem := v.Args[2]
- if !(is32BitFloat(val.Type)) {
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
break
}
- v.reset(OpARMMOVFstore)
- v.AddArg(ptr)
- v.AddArg(val)
+ v.reset(OpARMMOVWstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVWload, config.fe.TypeUInt32())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (Store [8] ptr val mem)
- // cond: is64BitFloat(val.Type)
- // result: (MOVDstore ptr val mem)
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
for {
- if v.AuxInt != 8 {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
break
}
- ptr := v.Args[0]
- val := v.Args[1]
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
mem := v.Args[2]
- if !(is64BitFloat(val.Type)) {
+ if !(SizeAndAlign(s).Size() == 4) {
break
}
- v.reset(OpARMMOVDstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 3
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0.AuxInt = 3
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AuxInt = 2
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v2.AuxInt = 2
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v3.AuxInt = 1
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v4.AuxInt = 1
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v5.AddArg(dst)
+ v6 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v6.AddArg(src)
+ v6.AddArg(mem)
+ v5.AddArg(v6)
+ v5.AddArg(mem)
+ v3.AddArg(v5)
+ v1.AddArg(v3)
+ v.AddArg(v1)
return true
}
- return false
-}
-func rewriteValueARM_OpSub16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub16 x y)
- // cond:
- // result: (SUB x y)
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSUB)
- v.AddArg(x)
- v.AddArg(y)
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AuxInt = 1
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v2.AuxInt = 1
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
return true
}
-}
-func rewriteValueARM_OpSub32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub32 x y)
- // cond:
- // result: (SUB x y)
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0
+ // result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSUB)
- v.AddArg(x)
- v.AddArg(y)
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpARMDUFFCOPY)
+ v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/4))
+ v.AddArg(dst)
+ v.AddArg(src)
+ v.AddArg(mem)
return true
}
-}
-func rewriteValueARM_OpSub32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub32F x y)
- // cond:
- // result: (SUBF x y)
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0
+ // result: (LoweredMove dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()]) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpARMLoweredMove)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
+ v0.AddArg(src)
+ v0.AuxInt = SizeAndAlign(s).Size()
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0
+ // result: (LoweredMoveU dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()]) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0) {
+ break
+ }
+ v.reset(OpARMLoweredMoveU)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
+ v0.AddArg(src)
+ v0.AuxInt = SizeAndAlign(s).Size()
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpMul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul16 x y)
+ // cond:
+ // result: (MUL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSUBF)
+ v.reset(OpARMMUL)
v.AddArg(x)
v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpSub32carry(v *Value, config *Config) bool {
+func rewriteValueARM_OpMul32(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Sub32carry x y)
+ // match: (Mul32 x y)
// cond:
- // result: (SUBS x y)
+ // result: (MUL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSUBS)
+ v.reset(OpARMMUL)
v.AddArg(x)
v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpSub32withcarry(v *Value, config *Config) bool {
+func rewriteValueARM_OpMul32F(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Sub32withcarry x y c)
+ // match: (Mul32F x y)
// cond:
- // result: (SBC x y c)
+ // result: (MULF x y)
for {
x := v.Args[0]
y := v.Args[1]
- c := v.Args[2]
- v.reset(OpARMSBC)
+ v.reset(OpARMMULF)
v.AddArg(x)
v.AddArg(y)
- v.AddArg(c)
return true
}
}
-func rewriteValueARM_OpSub64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpMul32uhilo(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Sub64F x y)
+ // match: (Mul32uhilo x y)
// cond:
- // result: (SUBD x y)
+ // result: (MULLU x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSUBD)
+ v.reset(OpARMMULLU)
v.AddArg(x)
v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpSub8(v *Value, config *Config) bool {
+func rewriteValueARM_OpMul64F(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Sub8 x y)
+ // match: (Mul64F x y)
// cond:
- // result: (SUB x y)
+ // result: (MULD x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSUB)
+ v.reset(OpARMMULD)
v.AddArg(x)
v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpSubPtr(v *Value, config *Config) bool {
+func rewriteValueARM_OpMul8(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (SubPtr x y)
+ // match: (Mul8 x y)
// cond:
- // result: (SUB x y)
+ // result: (MUL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSUB)
+ v.reset(OpARMMUL)
v.AddArg(x)
v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpTrunc16to8(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeg16(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Trunc16to8 x)
+ // match: (Neg16 x)
// cond:
- // result: x
+ // result: (RSBconst [0] x)
for {
x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
v.AddArg(x)
return true
}
}
-func rewriteValueARM_OpTrunc32to16(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeg32(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Trunc32to16 x)
+ // match: (Neg32 x)
// cond:
- // result: x
+ // result: (RSBconst [0] x)
for {
x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
v.AddArg(x)
return true
}
}
-func rewriteValueARM_OpTrunc32to8(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeg32F(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Trunc32to8 x)
+ // match: (Neg32F x)
// cond:
- // result: x
+ // result: (MULF (MOVFconst [int64(math.Float64bits(-1))]) x)
for {
x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
+ v.reset(OpARMMULF)
+ v0 := b.NewValue0(v.Line, OpARMMOVFconst, config.fe.TypeFloat32())
+ v0.AuxInt = int64(math.Float64bits(-1))
+ v.AddArg(v0)
v.AddArg(x)
return true
}
}
-func rewriteValueARM_OpXor16(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeg64F(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Xor16 x y)
+ // match: (Neg64F x)
// cond:
- // result: (XOR x y)
+ // result: (MULD (MOVDconst [int64(math.Float64bits(-1))]) x)
for {
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMXOR)
+ v.reset(OpARMMULD)
+ v0 := b.NewValue0(v.Line, OpARMMOVDconst, config.fe.TypeFloat64())
+ v0.AuxInt = int64(math.Float64bits(-1))
+ v.AddArg(v0)
v.AddArg(x)
- v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpXor32(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeg8(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Xor32 x y)
+ // match: (Neg8 x)
// cond:
- // result: (XOR x y)
+ // result: (RSBconst [0] x)
for {
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMXOR)
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
v.AddArg(x)
- v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpXor8(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeq16(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Xor8 x y)
+ // match: (Neq16 x y)
// cond:
- // result: (XOR x y)
+ // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMXOR)
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpZero(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeq32(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Zero [s] _ mem)
- // cond: SizeAndAlign(s).Size() == 0
- // result: mem
+ // match: (Neq32 x y)
+ // cond:
+ // result: (NotEqual (CMP x y))
for {
- s := v.AuxInt
- mem := v.Args[1]
- if !(SizeAndAlign(s).Size() == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = mem.Type
- v.AddArg(mem)
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
- // match: (Zero [s] ptr mem)
- // cond: SizeAndAlign(s).Size() == 1
- // result: (MOVBstore ptr (MOVWconst [0]) mem)
+}
+func rewriteValueARM_OpNeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32F x y)
+ // cond:
+ // result: (NotEqual (CMPF x y))
for {
- s := v.AuxInt
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(SizeAndAlign(s).Size() == 1) {
- break
- }
- v.reset(OpARMMOVBstore)
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v0.AuxInt = 0
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
v.AddArg(v0)
- v.AddArg(mem)
return true
}
- // match: (Zero [s] ptr mem)
- // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
- // result: (MOVHstore ptr (MOVWconst [0]) mem)
+}
+func rewriteValueARM_OpNeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64F x y)
+ // cond:
+ // result: (NotEqual (CMPD x y))
for {
- s := v.AuxInt
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
- break
- }
- v.reset(OpARMMOVHstore)
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v0.AuxInt = 0
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
v.AddArg(v0)
- v.AddArg(mem)
return true
}
- // match: (Zero [s] ptr mem)
- // cond: SizeAndAlign(s).Size() == 2
- // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))
+}
+func rewriteValueARM_OpNeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq8 x y)
+ // cond:
+ // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
for {
- s := v.AuxInt
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(SizeAndAlign(s).Size() == 2) {
- break
- }
- v.reset(OpARMMOVBstore)
- v.AuxInt = 1
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v0.AuxInt = 0
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v1.AuxInt = 0
- v1.AddArg(ptr)
- v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v2.AuxInt = 0
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
return true
}
- // match: (Zero [s] ptr mem)
- // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
- // result: (MOVWstore ptr (MOVWconst [0]) mem)
+}
+func rewriteValueARM_OpNeqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqB x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMXOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqPtr x y)
+ // cond:
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NilCheck ptr mem)
+ // cond:
+ // result: (LoweredNilCheck ptr mem)
for {
- s := v.AuxInt
ptr := v.Args[0]
mem := v.Args[1]
- if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
- break
- }
- v.reset(OpARMMOVWstore)
+ v.reset(OpARMLoweredNilCheck)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v0.AuxInt = 0
- v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (Zero [s] ptr mem)
- // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
- // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))
+}
+func rewriteValueARM_OpNot(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Not x)
+ // cond:
+ // result: (XORconst [1] x)
for {
- s := v.AuxInt
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+ x := v.Args[0]
+ v.reset(OpARMXORconst)
+ v.AuxInt = 1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpARMNotEqual(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NotEqual (FlagEQ))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
break
}
- v.reset(OpARMMOVHstore)
- v.AuxInt = 2
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v0.AuxInt = 0
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem)
- v1.AuxInt = 0
- v1.AddArg(ptr)
- v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v2.AuxInt = 0
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
return true
}
- // match: (Zero [s] ptr mem)
- // cond: SizeAndAlign(s).Size() == 4
- // result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))))
+ // match: (NotEqual (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
for {
- s := v.AuxInt
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(SizeAndAlign(s).Size() == 4) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
break
}
- v.reset(OpARMMOVBstore)
- v.AuxInt = 3
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v0.AuxInt = 0
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v1.AuxInt = 2
- v1.AddArg(ptr)
- v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v2.AuxInt = 0
- v1.AddArg(v2)
- v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v3.AuxInt = 1
- v3.AddArg(ptr)
- v4 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v4.AuxInt = 0
- v3.AddArg(v4)
- v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v5.AuxInt = 0
- v5.AddArg(ptr)
- v6 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v6.AuxInt = 0
- v5.AddArg(v6)
- v5.AddArg(mem)
- v3.AddArg(v5)
- v1.AddArg(v3)
- v.AddArg(v1)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
- // match: (Zero [s] ptr mem)
- // cond: SizeAndAlign(s).Size() == 3
- // result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))
+ // match: (NotEqual (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
for {
- s := v.AuxInt
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(SizeAndAlign(s).Size() == 3) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
break
}
- v.reset(OpARMMOVBstore)
- v.AuxInt = 2
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v0.AuxInt = 0
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v1.AuxInt = 1
- v1.AddArg(ptr)
- v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v2.AuxInt = 0
- v1.AddArg(v2)
- v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v3.AuxInt = 0
- v3.AddArg(ptr)
- v4 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v4.AuxInt = 0
- v3.AddArg(v4)
- v3.AddArg(mem)
- v1.AddArg(v3)
- v.AddArg(v1)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
- // match: (Zero [s] ptr mem)
- // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0
- // result: (DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/4))] ptr (MOVWconst [0]) mem)
+ // match: (NotEqual (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
for {
- s := v.AuxInt
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
break
}
- v.reset(OpARMDUFFZERO)
- v.AuxInt = 4 * (128 - int64(SizeAndAlign(s).Size()/4))
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
- // match: (Zero [s] ptr mem)
- // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0
- // result: (LoweredZero ptr (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()]) (MOVWconst [0]) mem)
+ // match: (NotEqual (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
for {
- s := v.AuxInt
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
break
}
- v.reset(OpARMLoweredZero)
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Line, OpARMADDconst, ptr.Type)
- v0.AddArg(ptr)
- v0.AuxInt = SizeAndAlign(s).Size()
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v1.AuxInt = 0
- v.AddArg(v1)
- v.AddArg(mem)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
return true
}
- // match: (Zero [s] ptr mem)
- // cond: SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0
- // result: (LoweredZeroU ptr (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()]) (MOVWconst [0]) mem)
+ // match: (NotEqual (InvertFlags x))
+ // cond:
+ // result: (NotEqual x)
for {
- s := v.AuxInt
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0) {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
break
}
- v.reset(OpARMLoweredZeroU)
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Line, OpARMADDconst, ptr.Type)
- v0.AddArg(ptr)
- v0.AuxInt = SizeAndAlign(s).Size()
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
- v1.AuxInt = 0
- v.AddArg(v1)
- v.AddArg(mem)
+ x := v_0.Args[0]
+ v.reset(OpARMNotEqual)
+ v.AddArg(x)
return true
}
return false
}
-func rewriteValueARM_OpZeroExt16to32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (ZeroExt16to32 x)
+ // match: (OR (MOVWconst [c]) x)
// cond:
- // result: (MOVHUreg x)
+ // result: (ORconst [c] x)
for {
- x := v.Args[0]
- v.reset(OpARMMOVHUreg)
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (OR x (MOVWconst [c]))
+ // cond:
+ // result: (ORconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (OR x x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
v.AddArg(x)
return true
}
+ return false
}
-func rewriteValueARM_OpZeroExt8to16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMORconst(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (ZeroExt8to16 x)
+ // match: (ORconst [0] x)
// cond:
- // result: (MOVBUreg x)
+ // result: x
for {
+ if v.AuxInt != 0 {
+ break
+ }
x := v.Args[0]
- v.reset(OpARMMOVBUreg)
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVWconst [-1])
+ for {
+ c := v.AuxInt
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = -1
+ return true
+ }
+ // match: (ORconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [c|d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c | d
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // cond:
+ // result: (ORconst [c|d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMORconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMORconst)
+ v.AuxInt = c | d
v.AddArg(x)
return true
}
+ return false
}
-func rewriteValueARM_OpZeroExt8to32(v *Value, config *Config) bool {
+func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (ZeroExt8to32 x)
+ // match: (OffPtr [off] ptr:(SP))
// cond:
- // result: (MOVBUreg x)
+ // result: (MOVWaddr [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpARMMOVWaddr)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond:
+ // result: (ADDconst [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueARM_OpOr16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or16 x y)
+ // cond:
+ // result: (OR x y)
for {
x := v.Args[0]
- v.reset(OpARMMOVBUreg)
+ y := v.Args[1]
+ v.reset(OpARMOR)
v.AddArg(x)
+ v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpZeromask(v *Value, config *Config) bool {
+func rewriteValueARM_OpOr32(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Zeromask x)
+ // match: (Or32 x y)
// cond:
- // result: (LoweredZeromask x)
+ // result: (OR x y)
for {
x := v.Args[0]
- v.reset(OpARMLoweredZeromask)
+ y := v.Args[1]
+ v.reset(OpARMOR)
v.AddArg(x)
+ v.AddArg(y)
return true
}
}
-func rewriteBlockARM(b *Block) bool {
- switch b.Kind {
- case BlockIf:
- // match: (If (Equal cc) yes no)
+func rewriteValueARM_OpOr8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or8 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpOrB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OrB x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSB (MOVWconst [c]) x)
+ // cond:
+ // result: (SUBconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSB x (MOVWconst [c]))
+ // cond:
+ // result: (RSBconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(int32(c-d))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int32(c - d))
+ return true
+ }
+ // match: (RSBconst [c] (RSBconst [d] x))
+ // cond:
+ // result: (ADDconst [int64(int32(c-d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(int32(c - d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBconst [c] (ADDconst [d] x))
+ // cond:
+ // result: (RSBconst [int64(int32(c-d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(int32(c - d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBconst [c] (SUBconst [d] x))
+ // cond:
+ // result: (RSBconst [int64(int32(c+d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCconst [c] (ADDconst [d] x) flags)
+ // cond:
+ // result: (RSCconst [int64(int32(c-d))] x flags)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ flags := v.Args[1]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int64(int32(c - d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCconst [c] (SUBconst [d] x) flags)
+ // cond:
+ // result: (RSCconst [int64(int32(c+d))] x flags)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ flags := v.Args[1]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux16 x y)
+ // cond:
+ // result: (SRL (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux32 x y)
+ // cond:
+ // result: (SRL (ZeroExt16to32 x) y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v0.AuxInt = 16
+ v.AddArg(v0)
+ v.AuxInt = c + 16
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux8 x y)
+ // cond:
+ // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x16 x y)
+ // cond:
+ // result: (SRA (SignExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x32 x y)
+ // cond:
+ // result: (SRA (SignExt16to32 x) y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v0.AuxInt = 16
+ v.AddArg(v0)
+ v.AuxInt = c + 16
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v0.AuxInt = 16
+ v.AddArg(v0)
+ v.AuxInt = 31
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x8 x y)
+ // cond:
+ // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux16 x y)
+ // cond:
+ // result: (SRL x (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux32 x y)
+ // cond:
+ // result: (SRL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SRLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh32Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux8 x y)
+ // cond:
+ // result: (SRL x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x16 x y)
+ // cond:
+ // result: (SRA x (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x32 x y)
+ // cond:
+ // result: (SRA x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SRAconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AddArg(x)
+ v.AuxInt = 31
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x8 x y)
+ // cond:
+ // result: (SRA x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux16 x y)
+ // cond:
+ // result: (SRL (ZeroExt8to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux32 x y)
+ // cond:
+ // result: (SRL (ZeroExt8to32 x) y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v0.AuxInt = 24
+ v.AddArg(v0)
+ v.AuxInt = c + 24
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh8Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux8 x y)
+ // cond:
+ // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x16 x y)
+ // cond:
+ // result: (SRA (SignExt8to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x32 x y)
+ // cond:
+ // result: (SRA (SignExt8to32 x) y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v0.AuxInt = 24
+ v.AddArg(v0)
+ v.AuxInt = c + 24
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v0.AuxInt = 24
+ v.AddArg(v0)
+ v.AuxInt = 31
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x8 x y)
+ // cond:
+ // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBC (MOVWconst [c]) x flags)
+ // cond:
+ // result: (RSCconst [c] x flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (MOVWconst [c]) flags)
+ // cond:
+ // result: (SBCconst [c] x flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCconst [c] (ADDconst [d] x) flags)
+ // cond:
+ // result: (SBCconst [int64(int32(c-d))] x flags)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ flags := v.Args[1]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int64(int32(c - d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCconst [c] (SUBconst [d] x) flags)
+ // cond:
+ // result: (SBCconst [int64(int32(c+d))] x flags)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ flags := v.Args[1]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SLL x (MOVWconst [c]))
+ // cond:
+ // result: (SLLconst x [c&31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSLLconst)
+ v.AddArg(x)
+ v.AuxInt = c & 31
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSLLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SLLconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(uint32(d)<<uint64(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(uint32(d) << uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRA x (MOVWconst [c]))
+ // cond:
+ // result: (SRAconst x [c&31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSRAconst)
+ v.AddArg(x)
+ v.AuxInt = c & 31
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRAconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRAconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(int32(d)>>uint64(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int32(d) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRL x (MOVWconst [c]))
+ // cond:
+ // result: (SRLconst x [c&31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSRLconst)
+ v.AddArg(x)
+ v.AuxInt = c & 31
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRLconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(uint32(d)>>uint64(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(uint32(d) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUB (MOVWconst [c]) x)
+ // cond:
+ // result: (RSBconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x (MOVWconst [c]))
+ // cond:
+ // result: (SUBconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x x)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBS (MOVWconst [c]) x)
+ // cond:
+ // result: (RSBSconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBS x (MOVWconst [c]))
+ // cond:
+ // result: (SUBSconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(int32(d-c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int32(d - c))
+ return true
+ }
+ // match: (SUBconst [c] (SUBconst [d] x))
+ // cond:
+ // result: (ADDconst [int64(int32(-c-d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(int32(-c - d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (ADDconst [d] x))
+ // cond:
+ // result: (ADDconst [int64(int32(-c+d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(int32(-c + d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (RSBconst [d] x))
+ // cond:
+ // result: (RSBconst [int64(int32(-c+d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(int32(-c + d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpSelect0(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Select0 <t> x)
+ // cond: t.IsFlags()
+ // result: (Carry x)
+ for {
+ t := v.Type
+ x := v.Args[0]
+ if !(t.IsFlags()) {
+ break
+ }
+ v.reset(OpARMCarry)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select0 <t> x)
+ // cond: !t.IsFlags()
+ // result: (LoweredSelect0 x)
+ for {
+ t := v.Type
+ x := v.Args[0]
+ if !(!t.IsFlags()) {
+ break
+ }
+ v.reset(OpARMLoweredSelect0)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpSelect1(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Select1 x)
+ // cond:
+ // result: (LoweredSelect1 x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMLoweredSelect1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt16to32 x)
+ // cond:
+ // result: (MOVHreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSignExt8to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to16 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSignExt8to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to32 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSignmask(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Signmask x)
+ // cond:
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v.reset(OpARMSRAconst)
+ v.AddArg(x)
+ v.AuxInt = 31
+ return true
+ }
+}
+func rewriteValueARM_OpSqrt(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sqrt x)
+ // cond:
+ // result: (SQRTD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMSQRTD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpStaticCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (StaticCall [argwid] {target} mem)
+ // cond:
+ // result: (CALLstatic [argwid] {target} mem)
+ for {
+ argwid := v.AuxInt
+ target := v.Aux
+ mem := v.Args[0]
+ v.reset(OpARMCALLstatic)
+ v.AuxInt = argwid
+ v.Aux = target
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpStore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Store [1] ptr val mem)
+ // cond:
+ // result: (MOVBstore ptr val mem)
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVBstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [2] ptr val mem)
+ // cond:
+ // result: (MOVHstore ptr val mem)
+ for {
+ if v.AuxInt != 2 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVHstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond: !is32BitFloat(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(!is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond: is32BitFloat(val.Type)
+ // result: (MOVFstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVFstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [8] ptr val mem)
+ // cond: is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVDstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpSub16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub16 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32F x y)
+ // cond:
+ // result: (SUBF x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUBF)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub32carry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32carry x y)
+ // cond:
+ // result: (SUBS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUBS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub32withcarry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32withcarry x y c)
+ // cond:
+ // result: (SBC x y c)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ c := v.Args[2]
+ v.reset(OpARMSBC)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(c)
+ return true
+ }
+}
+func rewriteValueARM_OpSub64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub64F x y)
+ // cond:
+ // result: (SUBD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUBD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub8 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSubPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SubPtr x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpTrunc16to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc16to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpTrunc32to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to16 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpTrunc32to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XOR (MOVWconst [c]) x)
+ // cond:
+ // result: (XORconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XOR x (MOVWconst [c]))
+ // cond:
+ // result: (XORconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XOR x x)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [c^d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c ^ d
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // cond:
+ // result: (XORconst [c^d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMXORconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c ^ d
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpXor16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor16 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMXOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpXor32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor32 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMXOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpXor8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor8 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMXOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpZero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Zero [s] _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstore ptr (MOVWconst [0]) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore ptr (MOVWconst [0]) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 1
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AuxInt = 0
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+ // result: (MOVWstore ptr (MOVWconst [0]) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = 2
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem)
+ v1.AuxInt = 0
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 3
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AuxInt = 2
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v3.AuxInt = 1
+ v3.AddArg(ptr)
+ v4 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v4.AuxInt = 0
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v5.AuxInt = 0
+ v5.AddArg(ptr)
+ v6 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v6.AuxInt = 0
+ v5.AddArg(v6)
+ v5.AddArg(mem)
+ v3.AddArg(v5)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 2
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AuxInt = 1
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v3.AuxInt = 0
+ v3.AddArg(ptr)
+ v4 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v4.AuxInt = 0
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0
+ // result: (DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/4))] ptr (MOVWconst [0]) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpARMDUFFZERO)
+ v.AuxInt = 4 * (128 - int64(SizeAndAlign(s).Size()/4))
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0
+ // result: (LoweredZero ptr (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()]) (MOVWconst [0]) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpARMLoweredZero)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMADDconst, ptr.Type)
+ v0.AddArg(ptr)
+ v0.AuxInt = SizeAndAlign(s).Size()
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v1.AuxInt = 0
+ v.AddArg(v1)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0
+ // result: (LoweredZeroU ptr (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()]) (MOVWconst [0]) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0) {
+ break
+ }
+ v.reset(OpARMLoweredZeroU)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMADDconst, ptr.Type)
+ v0.AddArg(ptr)
+ v0.AuxInt = SizeAndAlign(s).Size()
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v1.AuxInt = 0
+ v.AddArg(v1)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpZeroExt16to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt16to32 x)
+ // cond:
+ // result: (MOVHUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVHUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpZeroExt8to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to16 x)
+ // cond:
+ // result: (MOVBUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpZeroExt8to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to32 x)
+ // cond:
+ // result: (MOVBUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpZeromask(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Zeromask x)
+ // cond:
+ // result: (LoweredZeromask x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMLoweredZeromask)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteBlockARM(b *Block) bool {
+ switch b.Kind {
+ case BlockARMEQ:
+ // match: (EQ (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (EQ (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (InvertFlags cmp) yes no)
+ // cond:
+ // result: (EQ cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMEQ
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARMGE:
+ // match: (GE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (LE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMLE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARMGT:
+ // match: (GT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GT (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GT (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GT (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GT (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (LT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMLT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockIf:
+ // match: (If (Equal cc) yes no)
+ // cond:
+ // result: (EQ cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMEQ
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (NotEqual cc) yes no)
+ // cond:
+ // result: (NE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMNotEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMNE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (LessThan cc) yes no)
+ // cond:
+ // result: (LT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMLessThan {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMLT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (LessThanU cc) yes no)
+ // cond:
+ // result: (ULT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMLessThanU {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMULT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (LessEqual cc) yes no)
+ // cond:
+ // result: (LE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMLessEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMLE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (LessEqualU cc) yes no)
+ // cond:
+ // result: (ULE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMLessEqualU {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMULE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterThan cc) yes no)
+ // cond:
+ // result: (GT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMGreaterThan {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMGT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterThanU cc) yes no)
+ // cond:
+ // result: (UGT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMGreaterThanU {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMUGT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterEqual cc) yes no)
+ // cond:
+ // result: (GE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMGreaterEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMGE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterEqualU cc) yes no)
+ // cond:
+ // result: (UGE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMGreaterEqualU {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMUGE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If cond yes no)
+ // cond:
+ // result: (NE (CMPconst [0] cond) yes no)
+ for {
+ v := b.Control
+ cond := b.Control
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMNE
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(cond)
+ b.SetControl(v0)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARMLE:
+ // match: (LE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (GE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMGE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARMLT:
+ // match: (LT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LT (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LT (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LT (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LT (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (GT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMGT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARMNE:
+ // match: (NE (CMPconst [0] (Equal cc)) yes no)
+ // cond:
+ // result: (EQ cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMEQ
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (NotEqual cc)) yes no)
+ // cond:
+ // result: (NE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMNotEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMNE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessThan cc)) yes no)
+ // cond:
+ // result: (LT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMLessThan {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMLT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessThanU cc)) yes no)
+ // cond:
+ // result: (ULT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMLessThanU {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMULT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessEqual cc)) yes no)
+ // cond:
+ // result: (LE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMLessEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMLE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessEqualU cc)) yes no)
+ // cond:
+ // result: (ULE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMLessEqualU {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMULE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterThan cc)) yes no)
+ // cond:
+ // result: (GT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMGreaterThan {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMGT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no)
+ // cond:
+ // result: (UGT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMGreaterThanU {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMUGT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no)
+ // cond:
+ // result: (GE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMGreaterEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMGE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no)
+ // cond:
+ // result: (UGE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMGreaterEqualU {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMUGE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (NE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagGT_ULT) yes no)
// cond:
- // result: (EQ cc yes no)
+ // result: (First nil yes no)
for {
v := b.Control
- if v.Op != OpARMEqual {
+ if v.Op != OpARMFlagGT_ULT {
break
}
- cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMEQ
- b.SetControl(cc)
+ b.Kind = BlockFirst
+ b.SetControl(nil)
_ = yes
_ = no
return true
}
- // match: (If (NotEqual cc) yes no)
+ // match: (NE (FlagGT_UGT) yes no)
// cond:
- // result: (NE cc yes no)
+ // result: (First nil yes no)
for {
v := b.Control
- if v.Op != OpARMNotEqual {
+ if v.Op != OpARMFlagGT_UGT {
break
}
- cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMNE
- b.SetControl(cc)
+ b.Kind = BlockFirst
+ b.SetControl(nil)
_ = yes
_ = no
return true
}
- // match: (If (LessThan cc) yes no)
+ // match: (NE (InvertFlags cmp) yes no)
// cond:
- // result: (LT cc yes no)
+ // result: (NE cmp yes no)
for {
v := b.Control
- if v.Op != OpARMLessThan {
+ if v.Op != OpARMInvertFlags {
break
}
- cc := v.Args[0]
+ cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMLT
- b.SetControl(cc)
+ b.Kind = BlockARMNE
+ b.SetControl(cmp)
_ = yes
_ = no
return true
}
- // match: (If (LessThanU cc) yes no)
+ case BlockARMUGE:
+ // match: (UGE (FlagEQ) yes no)
// cond:
- // result: (ULT cc yes no)
+ // result: (First nil yes no)
for {
v := b.Control
- if v.Op != OpARMLessThanU {
+ if v.Op != OpARMFlagEQ {
break
}
- cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMULT
- b.SetControl(cc)
+ b.Kind = BlockFirst
+ b.SetControl(nil)
_ = yes
_ = no
return true
}
- // match: (If (LessEqual cc) yes no)
+ // match: (UGE (FlagLT_ULT) yes no)
// cond:
- // result: (LE cc yes no)
+ // result: (First nil no yes)
for {
v := b.Control
- if v.Op != OpARMLessEqual {
+ if v.Op != OpARMFlagLT_ULT {
break
}
- cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMLE
- b.SetControl(cc)
- _ = yes
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
_ = no
+ _ = yes
return true
}
- // match: (If (LessEqualU cc) yes no)
+ // match: (UGE (FlagLT_UGT) yes no)
// cond:
- // result: (ULE cc yes no)
+ // result: (First nil yes no)
for {
v := b.Control
- if v.Op != OpARMLessEqualU {
+ if v.Op != OpARMFlagLT_UGT {
break
}
- cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMULE
- b.SetControl(cc)
+ b.Kind = BlockFirst
+ b.SetControl(nil)
_ = yes
_ = no
return true
}
- // match: (If (GreaterThan cc) yes no)
+ // match: (UGE (FlagGT_ULT) yes no)
// cond:
- // result: (GT cc yes no)
+ // result: (First nil no yes)
for {
v := b.Control
- if v.Op != OpARMGreaterThan {
+ if v.Op != OpARMFlagGT_ULT {
break
}
- cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMGT
- b.SetControl(cc)
- _ = yes
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
_ = no
+ _ = yes
return true
}
- // match: (If (GreaterThanU cc) yes no)
+ // match: (UGE (FlagGT_UGT) yes no)
// cond:
- // result: (UGT cc yes no)
+ // result: (First nil yes no)
for {
v := b.Control
- if v.Op != OpARMGreaterThanU {
+ if v.Op != OpARMFlagGT_UGT {
break
}
- cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMUGT
- b.SetControl(cc)
+ b.Kind = BlockFirst
+ b.SetControl(nil)
_ = yes
_ = no
return true
}
- // match: (If (GreaterEqual cc) yes no)
+ // match: (UGE (InvertFlags cmp) yes no)
// cond:
- // result: (GE cc yes no)
+ // result: (ULE cmp yes no)
for {
v := b.Control
- if v.Op != OpARMGreaterEqual {
+ if v.Op != OpARMInvertFlags {
break
}
- cc := v.Args[0]
+ cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMGE
- b.SetControl(cc)
+ b.Kind = BlockARMULE
+ b.SetControl(cmp)
_ = yes
_ = no
return true
}
- // match: (If (GreaterEqualU cc) yes no)
+ case BlockARMUGT:
+ // match: (UGT (FlagEQ) yes no)
// cond:
- // result: (UGE cc yes no)
+ // result: (First nil no yes)
for {
v := b.Control
- if v.Op != OpARMGreaterEqualU {
+ if v.Op != OpARMFlagEQ {
break
}
- cc := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMUGE
- b.SetControl(cc)
- _ = yes
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
_ = no
+ _ = yes
return true
}
- // match: (If cond yes no)
+ // match: (UGT (FlagLT_ULT) yes no)
// cond:
- // result: (NE (CMPconst [0] cond) yes no)
+ // result: (First nil no yes)
for {
v := b.Control
- cond := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMNE
- v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v0.AuxInt = 0
- v0.AddArg(cond)
- b.SetControl(v0)
- _ = yes
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
_ = no
+ _ = yes
return true
}
- case BlockARMNE:
- // match: (NE (CMPconst [0] (Equal cc)) yes no)
+ // match: (UGT (FlagLT_UGT) yes no)
// cond:
- // result: (EQ cc yes no)
+ // result: (First nil yes no)
for {
v := b.Control
- if v.Op != OpARMCMPconst {
- break
- }
- if v.AuxInt != 0 {
- break
- }
- v_0 := v.Args[0]
- if v_0.Op != OpARMEqual {
+ if v.Op != OpARMFlagLT_UGT {
break
}
- cc := v_0.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMEQ
- b.SetControl(cc)
+ b.Kind = BlockFirst
+ b.SetControl(nil)
_ = yes
_ = no
return true
}
- // match: (NE (CMPconst [0] (NotEqual cc)) yes no)
+ // match: (UGT (FlagGT_ULT) yes no)
// cond:
- // result: (NE cc yes no)
+ // result: (First nil no yes)
for {
v := b.Control
- if v.Op != OpARMCMPconst {
- break
- }
- if v.AuxInt != 0 {
- break
- }
- v_0 := v.Args[0]
- if v_0.Op != OpARMNotEqual {
+ if v.Op != OpARMFlagGT_ULT {
break
}
- cc := v_0.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMNE
- b.SetControl(cc)
- _ = yes
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
_ = no
+ _ = yes
return true
}
- // match: (NE (CMPconst [0] (LessThan cc)) yes no)
+ // match: (UGT (FlagGT_UGT) yes no)
// cond:
- // result: (LT cc yes no)
+ // result: (First nil yes no)
for {
v := b.Control
- if v.Op != OpARMCMPconst {
+ if v.Op != OpARMFlagGT_UGT {
break
}
- if v.AuxInt != 0 {
- break
- }
- v_0 := v.Args[0]
- if v_0.Op != OpARMLessThan {
- break
- }
- cc := v_0.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMLT
- b.SetControl(cc)
+ b.Kind = BlockFirst
+ b.SetControl(nil)
_ = yes
_ = no
return true
}
- // match: (NE (CMPconst [0] (LessThanU cc)) yes no)
+ // match: (UGT (InvertFlags cmp) yes no)
// cond:
- // result: (ULT cc yes no)
+ // result: (ULT cmp yes no)
for {
v := b.Control
- if v.Op != OpARMCMPconst {
- break
- }
- if v.AuxInt != 0 {
+ if v.Op != OpARMInvertFlags {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpARMLessThanU {
- break
- }
- cc := v_0.Args[0]
+ cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
b.Kind = BlockARMULT
- b.SetControl(cc)
+ b.SetControl(cmp)
_ = yes
_ = no
return true
}
- // match: (NE (CMPconst [0] (LessEqual cc)) yes no)
+ case BlockARMULE:
+ // match: (ULE (FlagEQ) yes no)
// cond:
- // result: (LE cc yes no)
+ // result: (First nil yes no)
for {
v := b.Control
- if v.Op != OpARMCMPconst {
- break
- }
- if v.AuxInt != 0 {
- break
- }
- v_0 := v.Args[0]
- if v_0.Op != OpARMLessEqual {
+ if v.Op != OpARMFlagEQ {
break
}
- cc := v_0.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMLE
- b.SetControl(cc)
+ b.Kind = BlockFirst
+ b.SetControl(nil)
_ = yes
_ = no
return true
}
- // match: (NE (CMPconst [0] (LessEqualU cc)) yes no)
+ // match: (ULE (FlagLT_ULT) yes no)
// cond:
- // result: (ULE cc yes no)
+ // result: (First nil yes no)
for {
v := b.Control
- if v.Op != OpARMCMPconst {
+ if v.Op != OpARMFlagLT_ULT {
break
}
- if v.AuxInt != 0 {
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpARMLessEqualU {
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
break
}
- cc := v_0.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMULE
- b.SetControl(cc)
+ b.Kind = BlockFirst
+ b.SetControl(nil)
_ = yes
_ = no
return true
}
- // match: (NE (CMPconst [0] (GreaterThan cc)) yes no)
+ // match: (ULE (FlagGT_UGT) yes no)
// cond:
- // result: (GT cc yes no)
+ // result: (First nil no yes)
for {
v := b.Control
- if v.Op != OpARMCMPconst {
- break
- }
- if v.AuxInt != 0 {
+ if v.Op != OpARMFlagGT_UGT {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpARMGreaterThan {
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (UGE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
break
}
- cc := v_0.Args[0]
+ cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMGT
- b.SetControl(cc)
+ b.Kind = BlockARMUGE
+ b.SetControl(cmp)
_ = yes
_ = no
return true
}
- // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no)
+ case BlockARMULT:
+ // match: (ULT (FlagEQ) yes no)
// cond:
- // result: (UGT cc yes no)
+ // result: (First nil no yes)
for {
v := b.Control
- if v.Op != OpARMCMPconst {
- break
- }
- if v.AuxInt != 0 {
+ if v.Op != OpARMFlagEQ {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpARMGreaterThanU {
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULT (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
break
}
- cc := v_0.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMUGT
- b.SetControl(cc)
+ b.Kind = BlockFirst
+ b.SetControl(nil)
_ = yes
_ = no
return true
}
- // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no)
+ // match: (ULT (FlagLT_UGT) yes no)
// cond:
- // result: (GE cc yes no)
+ // result: (First nil no yes)
for {
v := b.Control
- if v.Op != OpARMCMPconst {
- break
- }
- if v.AuxInt != 0 {
+ if v.Op != OpARMFlagLT_UGT {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpARMGreaterEqual {
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULT (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
break
}
- cc := v_0.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMGE
- b.SetControl(cc)
+ b.Kind = BlockFirst
+ b.SetControl(nil)
_ = yes
_ = no
return true
}
- // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no)
+ // match: (ULT (FlagGT_UGT) yes no)
// cond:
- // result: (UGE cc yes no)
+ // result: (First nil no yes)
for {
v := b.Control
- if v.Op != OpARMCMPconst {
- break
- }
- if v.AuxInt != 0 {
+ if v.Op != OpARMFlagGT_UGT {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpARMGreaterEqualU {
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (UGT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
break
}
- cc := v_0.Args[0]
+ cmp := v.Args[0]
yes := b.Succs[0]
no := b.Succs[1]
- b.Kind = BlockARMUGE
- b.SetControl(cc)
+ b.Kind = BlockARMUGT
+ b.SetControl(cmp)
_ = yes
_ = no
return true