func branchelim(f *Func) {
// FIXME: add support for lowering CondSelects on more architectures
switch f.Config.arch {
- case "arm64", "amd64", "wasm":
+ case "arm64", "ppc64le", "ppc64", "amd64", "wasm":
// implemented
default:
return
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (ORCC x y) yes no)
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (XORCC x y) yes no)
-(CondSelect x y bool) && flagArg(bool) != nil => (ISEL [2] x y bool)
-(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [2] x y (CMPWconst [0] bool))
+// Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably.
+(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (CMPWconst [0] bool))
+// Fold any CR -> GPR -> CR transfers when applying the above rule.
+(ISEL [6] x y (CMPWconst [0] (ISELB [c] one cmp))) => (ISEL [c] x y cmp)
// Lowering loads
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
(NEG (ADDconst [c] x)) && is32Bit(-c) => (SUBFCconst [-c] x)
(NEG (SUBFCconst [c] x)) && is32Bit(-c) => (ADDconst [-c] x)
(NEG (SUB x y)) => (SUB y x)
+(NEG (NEG x)) => x
// Use register moves instead of stores and loads to move int<=>float values
// Common with math Float64bits, Float64frombits
v_0 := v.Args[0]
b := v.Block
// match: (CondSelect x y bool)
- // cond: flagArg(bool) != nil
- // result: (ISEL [2] x y bool)
- for {
- x := v_0
- y := v_1
- bool := v_2
- if !(flagArg(bool) != nil) {
- break
- }
- v.reset(OpPPC64ISEL)
- v.AuxInt = int32ToAuxInt(2)
- v.AddArg3(x, y, bool)
- return true
- }
- // match: (CondSelect x y bool)
// cond: flagArg(bool) == nil
- // result: (ISEL [2] x y (CMPWconst [0] bool))
+ // result: (ISEL [6] x y (CMPWconst [0] bool))
for {
x := v_0
y := v_1
break
}
v.reset(OpPPC64ISEL)
- v.AuxInt = int32ToAuxInt(2)
+ v.AuxInt = int32ToAuxInt(6)
v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags)
v0.AuxInt = int32ToAuxInt(0)
v0.AddArg(bool)
v.AddArg(y)
return true
}
+ // match: (ISEL [6] x y (CMPWconst [0] (ISELB [c] one cmp)))
+ // result: (ISEL [c] x y cmp)
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64ISELB {
+ break
+ }
+ c := auxIntToInt32(v_2_0.AuxInt)
+ cmp := v_2_0.Args[1]
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, cmp)
+ return true
+ }
// match: (ISEL [2] x _ (FlagEQ))
// result: x
for {
v.AddArg2(y, x)
return true
}
+ // match: (NEG (NEG x))
+ // result: x
+ for {
+ if v_0.Op != OpPPC64NEG {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
return false
}
func rewriteValuePPC64_OpPPC64NOR(v *Value) bool {