// Optimizations
-(ADD (MOVDconst [c]) x) && int64(int32(c)) == c -> (ADDconst [c] x)
-(ADD x (MOVDconst [c])) && int64(int32(c)) == c -> (ADDconst [c] x)
+(ADD (MOVDconst [c]) x) && is32Bit(c) -> (ADDconst [c] x)
+(ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x)
+(ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x)
// Fold offsets for stores.
(MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} x val mem)
return rewriteValuePPC64_OpOrB(v, config)
case OpPPC64ADD:
return rewriteValuePPC64_OpPPC64ADD(v, config)
+ case OpPPC64ADDconst:
+ return rewriteValuePPC64_OpPPC64ADDconst(v, config)
case OpPPC64CMPUconst:
return rewriteValuePPC64_OpPPC64CMPUconst(v, config)
case OpPPC64CMPWUconst:
b := v.Block
_ = b
// match: (ADD (MOVDconst [c]) x)
- // cond: int64(int32(c)) == c
+ // cond: is32Bit(c)
// result: (ADDconst [c] x)
for {
v_0 := v.Args[0]
}
c := v_0.AuxInt
x := v.Args[1]
- if !(int64(int32(c)) == c) {
+ if !(is32Bit(c)) {
break
}
v.reset(OpPPC64ADDconst)
return true
}
// match: (ADD x (MOVDconst [c]))
- // cond: int64(int32(c)) == c
+ // cond: is32Bit(c)
// result: (ADDconst [c] x)
for {
x := v.Args[0]
break
}
c := v_1.AuxInt
- if !(int64(int32(c)) == c) {
+ if !(is32Bit(c)) {
break
}
v.reset(OpPPC64ADDconst)
}
return false
}
+func rewriteValuePPC64_OpPPC64ADDconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (ADDconst [c+d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = c + d
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64CMPUconst(v *Value, config *Config) bool {
b := v.Block
_ = b