((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (XORCC x y) yes no)
// Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably.
-(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [1] bool)))
+(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (ANDCCconst [1] bool))
// Fold any CR -> GPR -> CR transfers when applying the above rule.
-(ISEL [6] x y (Select1 (ANDCCconst [1] (ISELB [c] one cmp)))) => (ISEL [c] x y cmp)
+(ISEL [6] x y (ANDCCconst [1] (ISELB [c] one cmp))) => (ISEL [c] x y cmp)
// Lowering loads
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
// small and of zero-extend => either zero-extend or small and
(ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF => y
-(ANDconst [0xFF] y:(MOVBreg _)) => y
+(ANDconst [0xFF] (MOVBreg x)) => (MOVBZreg x)
(ANDconst [c] y:(MOVHZreg _)) && c&0xFFFF == 0xFFFF => y
-(ANDconst [0xFFFF] y:(MOVHreg _)) => y
+(ANDconst [0xFFFF] (MOVHreg x)) => (MOVHZreg x)
(AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF => y
(AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x)
// normal case
-(ANDconst [c] (MOV(B|BZ)reg x)) => (ANDconst [c&0xFF] x)
-(ANDconst [c] (MOV(H|HZ)reg x)) => (ANDconst [c&0xFFFF] x)
-(ANDconst [c] (MOV(W|WZ)reg x)) => (ANDconst [c&0xFFFFFFFF] x)
+(ANDconst [c] (MOVBZreg x)) => (ANDconst [c&0xFF] x)
+(ANDconst [c] (MOVHZreg x)) => (ANDconst [c&0xFFFF] x)
+(ANDconst [c] (MOVWZreg x)) => (ANDconst [c&0xFFFFFFFF] x)
// Eliminate unnecessary sign/zero extend following right shift
(MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x))
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- typ := &b.Func.Config.Types
// match: (CondSelect x y bool)
// cond: flagArg(bool) == nil
- // result: (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [1] bool)))
+ // result: (ISEL [6] x y (ANDCCconst [1] bool))
for {
x := v_0
y := v_1
}
v.reset(OpPPC64ISEL)
v.AuxInt = int32ToAuxInt(6)
- v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v1.AuxInt = int64ToAuxInt(1)
- v1.AddArg(bool)
- v0.AddArg(v1)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg(bool)
v.AddArg3(x, y, v0)
return true
}
v.copyOf(y)
return true
}
- // match: (ANDconst [0xFF] y:(MOVBreg _))
- // result: y
+ // match: (ANDconst [0xFF] (MOVBreg x))
+ // result: (MOVBZreg x)
for {
- if auxIntToInt64(v.AuxInt) != 0xFF {
+ if auxIntToInt64(v.AuxInt) != 0xFF || v_0.Op != OpPPC64MOVBreg {
break
}
- y := v_0
- if y.Op != OpPPC64MOVBreg {
- break
- }
- v.copyOf(y)
+ x := v_0.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
return true
}
// match: (ANDconst [c] y:(MOVHZreg _))
v.copyOf(y)
return true
}
- // match: (ANDconst [0xFFFF] y:(MOVHreg _))
- // result: y
- for {
- if auxIntToInt64(v.AuxInt) != 0xFFFF {
- break
- }
- y := v_0
- if y.Op != OpPPC64MOVHreg {
- break
- }
- v.copyOf(y)
- return true
- }
- // match: (ANDconst [c] (MOVBreg x))
- // result: (ANDconst [c&0xFF] x)
+ // match: (ANDconst [0xFFFF] (MOVHreg x))
+ // result: (MOVHZreg x)
for {
- c := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpPPC64MOVBreg {
+ if auxIntToInt64(v.AuxInt) != 0xFFFF || v_0.Op != OpPPC64MOVHreg {
break
}
x := v_0.Args[0]
- v.reset(OpPPC64ANDconst)
- v.AuxInt = int64ToAuxInt(c & 0xFF)
+ v.reset(OpPPC64MOVHZreg)
v.AddArg(x)
return true
}
v.AddArg(x)
return true
}
- // match: (ANDconst [c] (MOVHreg x))
- // result: (ANDconst [c&0xFFFF] x)
- for {
- c := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpPPC64MOVHreg {
- break
- }
- x := v_0.Args[0]
- v.reset(OpPPC64ANDconst)
- v.AuxInt = int64ToAuxInt(c & 0xFFFF)
- v.AddArg(x)
- return true
- }
// match: (ANDconst [c] (MOVHZreg x))
// result: (ANDconst [c&0xFFFF] x)
for {
v.AddArg(x)
return true
}
- // match: (ANDconst [c] (MOVWreg x))
- // result: (ANDconst [c&0xFFFFFFFF] x)
- for {
- c := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpPPC64MOVWreg {
- break
- }
- x := v_0.Args[0]
- v.reset(OpPPC64ANDconst)
- v.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF)
- v.AddArg(x)
- return true
- }
// match: (ANDconst [c] (MOVWZreg x))
// result: (ANDconst [c&0xFFFFFFFF] x)
for {
v.AddArg(y)
return true
}
- // match: (ISEL [6] x y (Select1 (ANDCCconst [1] (ISELB [c] one cmp))))
+ // match: (ISEL [6] x y (ANDCCconst [1] (ISELB [c] one cmp)))
// result: (ISEL [c] x y cmp)
for {
if auxIntToInt32(v.AuxInt) != 6 {
}
x := v_0
y := v_1
- if v_2.Op != OpSelect1 {
+ if v_2.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2.AuxInt) != 1 {
break
}
v_2_0 := v_2.Args[0]
- if v_2_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0.AuxInt) != 1 {
- break
- }
- v_2_0_0 := v_2_0.Args[0]
- if v_2_0_0.Op != OpPPC64ISELB {
+ if v_2_0.Op != OpPPC64ISELB {
break
}
- c := auxIntToInt32(v_2_0_0.AuxInt)
- cmp := v_2_0_0.Args[1]
+ c := auxIntToInt32(v_2_0.AuxInt)
+ cmp := v_2_0.Args[1]
v.reset(OpPPC64ISEL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg3(x, y, cmp)
d, borrow := bits.Sub64(x, y, 0)
return x - d&(-borrow)
}
+
+// Verify sign-extended values are not zero-extended under a bit mask (#61297)
+func signextendAndMask8to64(a int8) (s, z uint64) {
+ // ppc64: "MOVB", "ANDCC\t[$]1015,"
+ // ppc64le: "MOVB", "ANDCC\t[$]1015,"
+ s = uint64(a) & 0x3F7
+ // ppc64: -"MOVB", "ANDCC\t[$]247,"
+ // ppc64le: -"MOVB", "ANDCC\t[$]247,"
+ z = uint64(uint8(a)) & 0x3F7
+ return
+
+}
+
+// Verify zero-extended values are not sign-extended under a bit mask (#61297)
+func zeroextendAndMask8to64(a int8, b int16) (x, y uint64) {
+ // ppc64: -"MOVB\t", -"ANDCC", "MOVBZ"
+ // ppc64le: -"MOVB\t", -"ANDCC", "MOVBZ"
+ x = uint64(a) & 0xFF
+ // ppc64: -"MOVH\t", -"ANDCC", "MOVHZ"
+ // ppc64le: -"MOVH\t", -"ANDCC", "MOVHZ"
+ y = uint64(b) & 0xFFFF
+ return
+
+}