return rewriteValueARM_OpGreater8(v, config)
case OpGreater8U:
return rewriteValueARM_OpGreater8U(v, config)
+ case OpHmul16:
+ return rewriteValueARM_OpHmul16(v, config)
+ case OpHmul16u:
+ return rewriteValueARM_OpHmul16u(v, config)
+ case OpHmul32:
+ return rewriteValueARM_OpHmul32(v, config)
+ case OpHmul32u:
+ return rewriteValueARM_OpHmul32u(v, config)
+ case OpHmul8:
+ return rewriteValueARM_OpHmul8(v, config)
+ case OpHmul8u:
+ return rewriteValueARM_OpHmul8u(v, config)
case OpInterCall:
return rewriteValueARM_OpInterCall(v, config)
case OpIsInBounds:
return rewriteValueARM_OpLess8U(v, config)
case OpLoad:
return rewriteValueARM_OpLoad(v, config)
+ case OpLsh16x16:
+ return rewriteValueARM_OpLsh16x16(v, config)
+ case OpLsh16x32:
+ return rewriteValueARM_OpLsh16x32(v, config)
+ case OpLsh16x64:
+ return rewriteValueARM_OpLsh16x64(v, config)
+ case OpLsh16x8:
+ return rewriteValueARM_OpLsh16x8(v, config)
+ case OpLsh32x16:
+ return rewriteValueARM_OpLsh32x16(v, config)
+ case OpLsh32x32:
+ return rewriteValueARM_OpLsh32x32(v, config)
+ case OpLsh32x64:
+ return rewriteValueARM_OpLsh32x64(v, config)
+ case OpLsh32x8:
+ return rewriteValueARM_OpLsh32x8(v, config)
+ case OpLsh8x16:
+ return rewriteValueARM_OpLsh8x16(v, config)
+ case OpLsh8x32:
+ return rewriteValueARM_OpLsh8x32(v, config)
+ case OpLsh8x64:
+ return rewriteValueARM_OpLsh8x64(v, config)
+ case OpLsh8x8:
+ return rewriteValueARM_OpLsh8x8(v, config)
case OpARMMOVBUload:
return rewriteValueARM_OpARMMOVBUload(v, config)
case OpARMMOVBload:
return rewriteValueARM_OpARMMOVWload(v, config)
case OpARMMOVWstore:
return rewriteValueARM_OpARMMOVWstore(v, config)
+ case OpMul16:
+ return rewriteValueARM_OpMul16(v, config)
+ case OpMul32:
+ return rewriteValueARM_OpMul32(v, config)
+ case OpMul8:
+ return rewriteValueARM_OpMul8(v, config)
case OpNeg16:
return rewriteValueARM_OpNeg16(v, config)
case OpNeg32:
return rewriteValueARM_OpOr8(v, config)
case OpOrB:
return rewriteValueARM_OpOrB(v, config)
+ case OpRsh16Ux16:
+ return rewriteValueARM_OpRsh16Ux16(v, config)
+ case OpRsh16Ux32:
+ return rewriteValueARM_OpRsh16Ux32(v, config)
+ case OpRsh16Ux64:
+ return rewriteValueARM_OpRsh16Ux64(v, config)
+ case OpRsh16Ux8:
+ return rewriteValueARM_OpRsh16Ux8(v, config)
+ case OpRsh16x16:
+ return rewriteValueARM_OpRsh16x16(v, config)
+ case OpRsh16x32:
+ return rewriteValueARM_OpRsh16x32(v, config)
+ case OpRsh16x64:
+ return rewriteValueARM_OpRsh16x64(v, config)
+ case OpRsh16x8:
+ return rewriteValueARM_OpRsh16x8(v, config)
+ case OpRsh32Ux16:
+ return rewriteValueARM_OpRsh32Ux16(v, config)
+ case OpRsh32Ux32:
+ return rewriteValueARM_OpRsh32Ux32(v, config)
+ case OpRsh32Ux64:
+ return rewriteValueARM_OpRsh32Ux64(v, config)
+ case OpRsh32Ux8:
+ return rewriteValueARM_OpRsh32Ux8(v, config)
+ case OpRsh32x16:
+ return rewriteValueARM_OpRsh32x16(v, config)
+ case OpRsh32x32:
+ return rewriteValueARM_OpRsh32x32(v, config)
+ case OpRsh32x64:
+ return rewriteValueARM_OpRsh32x64(v, config)
+ case OpRsh32x8:
+ return rewriteValueARM_OpRsh32x8(v, config)
+ case OpRsh8Ux16:
+ return rewriteValueARM_OpRsh8Ux16(v, config)
+ case OpRsh8Ux32:
+ return rewriteValueARM_OpRsh8Ux32(v, config)
+ case OpRsh8Ux64:
+ return rewriteValueARM_OpRsh8Ux64(v, config)
+ case OpRsh8Ux8:
+ return rewriteValueARM_OpRsh8Ux8(v, config)
+ case OpRsh8x16:
+ return rewriteValueARM_OpRsh8x16(v, config)
+ case OpRsh8x32:
+ return rewriteValueARM_OpRsh8x32(v, config)
+ case OpRsh8x64:
+ return rewriteValueARM_OpRsh8x64(v, config)
+ case OpRsh8x8:
+ return rewriteValueARM_OpRsh8x8(v, config)
case OpSignExt16to32:
return rewriteValueARM_OpSignExt16to32(v, config)
case OpSignExt8to16:
return true
}
}
+func rewriteValueARM_OpHmul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16 x y)
+ // cond:
+ // result: (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAconst)
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v.AuxInt = 16
+ return true
+ }
+}
+func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16u x y)
+ // cond:
+ // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRLconst)
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v.AuxInt = 16
+ return true
+ }
+}
+func rewriteValueARM_OpHmul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32 x y)
+ // cond:
+ // result: (HMUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMHMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32u x y)
+ // cond:
+ // result: (HMULU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMHMULU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8 x y)
+ // cond:
+ // result: (SRAconst (MUL <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAconst)
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16())
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v.AuxInt = 8
+ return true
+ }
+}
+func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8u x y)
+ // cond:
+ // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRLconst)
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16())
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v.AuxInt = 8
+ return true
+ }
+}
func rewriteValueARM_OpInterCall(v *Value, config *Config) bool {
b := v.Block
_ = b
}
return false
}
-func rewriteValueARM_OpARMMOVBUload(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVBUload [off1] {sym1} (ADDconst [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // match: (Lsh16x16 x y)
+ // cond:
+ // result: (SLL x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARMMOVBUload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- return false
}
-func rewriteValueARM_OpARMMOVBload(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVBload [off1] {sym1} (ADDconst [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // match: (Lsh16x32 x y)
+ // cond:
+ // result: (SLL x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARMMOVBload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- return false
}
-func rewriteValueARM_OpARMMOVBstore(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVBstore [off1] {sym1} (ADDconst [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SLLconst x [c])
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
break
}
- v.reset(OpARMMOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ v.reset(OpARMSLLconst)
+ v.AddArg(x)
+ v.AuxInt = c
return true
}
- return false
-}
-func rewriteValueARM_OpARMMOVHUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVHUload [off1] {sym1} (ADDconst [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
break
}
- v.reset(OpARMMOVHUload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.reset(OpConst16)
+ v.AuxInt = 0
return true
}
return false
}
-func rewriteValueARM_OpARMMOVHload(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVHload [off1] {sym1} (ADDconst [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // match: (Lsh16x8 x y)
+ // cond:
+ // result: (SLL x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARMMOVHload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- return false
}
-func rewriteValueARM_OpARMMOVHstore(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVHstore [off1] {sym1} (ADDconst [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // match: (Lsh32x16 x y)
+ // cond:
+ // result: (SLL x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARMMOVHstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- return false
}
-func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVWload [off1] {sym1} (ADDconst [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // match: (Lsh32x32 x y)
+ // cond:
+ // result: (SLL x y)
for {
- off1 := v.AuxInt
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x8 x y)
+ // cond:
+ // result: (SLL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x16 x y)
+ // cond:
+ // result: (SLL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x32 x y)
+ // cond:
+ // result: (SLL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x8 x y)
+ // cond:
+ // result: (SLL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpARMMOVBUload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBUload [off1] {sym1} (ADDconst [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpARMADDconst {
if !(canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpARMMOVWload)
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBload [off1] {sym1} (ADDconst [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstore [off1] {sym1} (ADDconst [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHUload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHUload [off1] {sym1} (ADDconst [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHload [off1] {sym1} (ADDconst [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVHload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
}
return false
}
-func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVHstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHstore [off1] {sym1} (ADDconst [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWload [off1] {sym1} (ADDconst [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstore [off1] {sym1} (ADDconst [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpMul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul16 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul8 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg16 x)
+ // cond:
+ // result: (RSBconst [0] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32 x)
+ // cond:
+ // result: (RSBconst [0] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg8 x)
+ // cond:
+ // result: (RSBconst [0] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq16 x y)
+ // cond:
+ // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32 x y)
+ // cond:
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq8 x y)
+ // cond:
+ // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqB x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMXOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqPtr x y)
+ // cond:
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NilCheck ptr mem)
+ // cond:
+ // result: (LoweredNilCheck ptr mem)
+ for {
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMLoweredNilCheck)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpNot(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Not x)
+ // cond:
+ // result: (XORconst [1] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMXORconst)
+ v.AuxInt = 1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OffPtr [off] ptr)
+ // cond:
+ // result: (ADD (MOVWconst <config.Frontend().TypeInt32()> [off]) ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.Frontend().TypeInt32())
+ v0.AuxInt = off
+ v.AddArg(v0)
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueARM_OpOr16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or16 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpOr32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or32 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpOr8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or8 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpOrB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OrB x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux16 x y)
+ // cond:
+ // result: (SRL (ZeroExt16to32 x) y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux32 x y)
+ // cond:
+ // result: (SRL (ZeroExt16to32 x) y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux64 <t> x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SRLconst (SLLconst <t> x [16]) [c+16])
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+ v0.AddArg(x)
+ v0.AuxInt = 16
+ v.AddArg(v0)
+ v.AuxInt = c + 16
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux8 x y)
+ // cond:
+ // result: (SRL (ZeroExt16to32 x) y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x16 x y)
+ // cond:
+ // result: (SRA (SignExt16to32 x) y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x32 x y)
+ // cond:
+ // result: (SRA (SignExt16to32 x) y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x64 <t> x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SRAconst (SLLconst <t> x [16]) [c+16])
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+ v0.AddArg(x)
+ v0.AuxInt = 16
+ v.AddArg(v0)
+ v.AuxInt = c + 16
+ return true
+ }
+ // match: (Rsh16x64 <t> x (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (SRAconst (SLLconst <t> x [16]) [31])
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+ v0.AddArg(x)
+ v0.AuxInt = 16
+ v.AddArg(v0)
+ v.AuxInt = 31
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16x8(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVWstore [off1] {sym1} (ADDconst [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // match: (Rsh16x8 x y)
+ // cond:
+ // result: (SRA (SignExt16to32 x) y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARMMOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
return true
}
- return false
}
-func rewriteValueARM_OpNeg16(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Neg16 x)
+ // match: (Rsh32Ux16 x y)
// cond:
- // result: (RSBconst [0] x)
+ // result: (SRL x y)
for {
x := v.Args[0]
- v.reset(OpARMRSBconst)
- v.AuxInt = 0
+ y := v.Args[1]
+ v.reset(OpARMSRL)
v.AddArg(x)
+ v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpNeg32(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Neg32 x)
+ // match: (Rsh32Ux32 x y)
// cond:
- // result: (RSBconst [0] x)
+ // result: (SRL x y)
for {
x := v.Args[0]
- v.reset(OpARMRSBconst)
- v.AuxInt = 0
+ y := v.Args[1]
+ v.reset(OpARMSRL)
v.AddArg(x)
+ v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpNeg8(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32Ux64(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Neg8 x)
- // cond:
- // result: (RSBconst [0] x)
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SRLconst x [c])
for {
x := v.Args[0]
- v.reset(OpARMRSBconst)
- v.AuxInt = 0
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSRLconst)
v.AddArg(x)
+ v.AuxInt = c
return true
}
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ return false
}
-func rewriteValueARM_OpNeq16(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32Ux8(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Neq16 x y)
+ // match: (Rsh32Ux8 x y)
// cond:
- // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ // result: (SRL x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
+ v.reset(OpARMSRL)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpNeq32(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Neq32 x y)
+ // match: (Rsh32x16 x y)
// cond:
- // result: (NotEqual (CMP x y))
+ // result: (SRA x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v.reset(OpARMSRA)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpNeq8(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Neq8 x y)
+ // match: (Rsh32x32 x y)
// cond:
- // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ // result: (SRA x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
+ v.reset(OpARMSRA)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpNeqB(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32x64(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (NeqB x y)
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SRAconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AddArg(x)
+ v.AuxInt = 31
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x8 x y)
// cond:
- // result: (XOR x y)
+ // result: (SRA x y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMXOR)
+ v.reset(OpARMSRA)
v.AddArg(x)
v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (NeqPtr x y)
+ // match: (Rsh8Ux16 x y)
// cond:
- // result: (NotEqual (CMP x y))
+ // result: (SRL (ZeroExt8to32 x) y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v0.AddArg(x)
- v0.AddArg(y)
v.AddArg(v0)
+ v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (NilCheck ptr mem)
+ // match: (Rsh8Ux32 x y)
// cond:
- // result: (LoweredNilCheck ptr mem)
+ // result: (SRL (ZeroExt8to32 x) y)
for {
- ptr := v.Args[0]
- mem := v.Args[1]
- v.reset(OpARMLoweredNilCheck)
- v.AddArg(ptr)
- v.AddArg(mem)
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpNot(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8Ux64(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Not x)
- // cond:
- // result: (XORconst [1] x)
+ // match: (Rsh8Ux64 <t> x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SRLconst (SLLconst <t> x [24]) [c+24])
for {
+ t := v.Type
x := v.Args[0]
- v.reset(OpARMXORconst)
- v.AuxInt = 1
- v.AddArg(x)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+ v0.AddArg(x)
+ v0.AuxInt = 24
+ v.AddArg(v0)
+ v.AuxInt = c + 24
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
return true
}
+ return false
}
-func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8Ux8(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (OffPtr [off] ptr)
+ // match: (Rsh8Ux8 x y)
// cond:
- // result: (ADD (MOVWconst <config.Frontend().TypeInt32()> [off]) ptr)
+ // result: (SRL (ZeroExt8to32 x) y)
for {
- off := v.AuxInt
- ptr := v.Args[0]
- v.reset(OpARMADD)
- v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.Frontend().TypeInt32())
- v0.AuxInt = off
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
v.AddArg(v0)
- v.AddArg(ptr)
+ v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpOr16(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Or16 x y)
+ // match: (Rsh8x16 x y)
// cond:
- // result: (OR x y)
+ // result: (SRA (SignExt8to32 x) y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMOR)
- v.AddArg(x)
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpOr32(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Or32 x y)
+ // match: (Rsh8x32 x y)
// cond:
- // result: (OR x y)
+ // result: (SRA (SignExt8to32 x) y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMOR)
- v.AddArg(x)
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpOr8(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8x64(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Or8 x y)
- // cond:
- // result: (OR x y)
+ // match: (Rsh8x64 <t> x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SRAconst (SLLconst <t> x [24]) [c+24])
for {
+ t := v.Type
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMOR)
- v.AddArg(x)
- v.AddArg(y)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+ v0.AddArg(x)
+ v0.AuxInt = 24
+ v.AddArg(v0)
+ v.AuxInt = c + 24
+ return true
+ }
+ // match: (Rsh8x64 <t> x (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (SRAconst (SLLconst <t> x [24]) [31])
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+ v0.AddArg(x)
+ v0.AuxInt = 24
+ v.AddArg(v0)
+ v.AuxInt = 31
return true
}
+ return false
}
-func rewriteValueARM_OpOrB(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8x8(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (OrB x y)
+ // match: (Rsh8x8 x y)
// cond:
- // result: (OR x y)
+ // result: (SRA (SignExt8to32 x) y)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMOR)
- v.AddArg(x)
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
v.AddArg(y)
return true
}