(ROTL x (MOVDconst [c])) => (ROTLconst x [c&63])
// Combine rotate and mask operations
-(Select0 (ANDCCconst [m] (ROTLWconst [r] x))) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+(ANDconst [m] (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
(AND (MOVDconst [m]) (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
-(Select0 (ANDCCconst [m] (ROTLW x r))) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+(ANDconst [m] (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
(AND (MOVDconst [m]) (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
// Note, any rotated word bitmask is still a valid word bitmask.
(ROTLWconst [r] (AND (MOVDconst [m]) x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
-(ROTLWconst [r] (Select0 (ANDCCconst [m] x))) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+(ROTLWconst [r] (ANDconst [m] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
-(Select0 (ANDCCconst [m] (SRWconst x [s]))) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
-(Select0 (ANDCCconst [m] (SRWconst x [s]))) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
+(ANDconst [m] (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
+(ANDconst [m] (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
-(SRWconst (Select0 (ANDCCconst [m] x)) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
-(SRWconst (Select0 (ANDCCconst [m] x)) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+(SRWconst (ANDconst [m] x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
+(SRWconst (ANDconst [m] x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
((Rsh64U|Lsh64)x64 <t> x y) => (ISEL [0] (S(R|L)D <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
((Rsh64U|Lsh64)x32 <t> x y) => (ISEL [0] (S(R|L)D <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
-((Rsh64U|Lsh64)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
-((Rsh64U|Lsh64)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
+((Rsh64U|Lsh64)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFC0] y)))
+((Rsh64U|Lsh64)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00C0] y)))
(Rsh64x(64|32) <t> x y) => (ISEL [0] (SRAD <t> x y) (SRADconst <t> x [63]) (CMP(U|WU)const y [64]))
-(Rsh64x16 <t> x y) => (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
-(Rsh64x8 <t> x y) => (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
+(Rsh64x16 <t> x y) => (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (CMPconst [0] (ANDconst [0xFFC0] y)))
+(Rsh64x8 <t> x y) => (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (CMPconst [0] (ANDconst [0x00C0] y)))
((Rsh32U|Lsh32)x64 <t> x y) => (ISEL [0] (S(R|L)W <t> x y) (MOVDconst [0]) (CMPUconst y [32]))
((Rsh32U|Lsh32)x32 <t> x y) => (ISEL [0] (S(R|L)W <t> x y) (MOVDconst [0]) (CMPWUconst y [32]))
-((Rsh32U|Lsh32)x16 <t> x y) => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
-((Rsh32U|Lsh32)x8 <t> x y) => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
+((Rsh32U|Lsh32)x16 <t> x y) => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFE0] y)))
+((Rsh32U|Lsh32)x8 <t> x y) => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00E0] y)))
(Rsh32x(64|32) <t> x y) => (ISEL [0] (SRAW <t> x y) (SRAWconst <t> x [31]) (CMP(U|WU)const y [32]))
-(Rsh32x16 <t> x y) => (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
-(Rsh32x8 <t> x y) => (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
+(Rsh32x16 <t> x y) => (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (CMPconst [0] (ANDconst [0xFFE0] y)))
+(Rsh32x8 <t> x y) => (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (CMPconst [0] (ANDconst [0x00E0] y)))
((Rsh16U|Lsh16)x64 <t> x y) => (ISEL [0] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPUconst y [16]))
((Rsh16U|Lsh16)x32 <t> x y) => (ISEL [0] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst y [16]))
-((Rsh16U|Lsh16)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
-((Rsh16U|Lsh16)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
+((Rsh16U|Lsh16)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF0] y)))
+((Rsh16U|Lsh16)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F0] y)))
(Rsh16x(64|32) <t> x y) => (ISEL [0] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (CMP(U|WU)const y [16]))
-(Rsh16x16 <t> x y) => (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
-(Rsh16x8 <t> x y) => (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
+(Rsh16x16 <t> x y) => (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0xFFF0] y)))
+(Rsh16x8 <t> x y) => (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0x00F0] y)))
((Rsh8U|Lsh8)x64 <t> x y) => (ISEL [0] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPUconst y [8]))
((Rsh8U|Lsh8)x32 <t> x y) => (ISEL [0] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst y [8]))
-((Rsh8U|Lsh8)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
-((Rsh8U|Lsh8)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
+((Rsh8U|Lsh8)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF8] y)))
+((Rsh8U|Lsh8)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F8] y)))
(Rsh8x(64|32) <t> x y) => (ISEL [0] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (CMP(U|WU)const y [8]))
-(Rsh8x16 <t> x y) => (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
-(Rsh8x8 <t> x y) => (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
+(Rsh8x16 <t> x y) => (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0xFFF8] y)))
+(Rsh8x8 <t> x y) => (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0x00F8] y)))
// Catch bounded shifts in situations like foo<<uint(shift&63) which might not be caught by the prove pass.
-(CMP(U|WU)const [d] (Select0 (ANDCCconst z [c]))) && uint64(d) > uint64(c) => (FlagLT)
+(CMP(U|WU)const [d] (ANDconst z [c])) && uint64(d) > uint64(c) => (FlagLT)
(ORN x (MOVDconst [-1])) => x
(OR x (NOR y y)) => (ORN x y)
// Lowering comparisons
-(EqB x y) => (Select0 <typ.Int> (ANDCCconst [1] (EQV x y)))
+(EqB x y) => (ANDconst [1] (EQV x y))
// Sign extension dependence on operand sign sets up for sign/zero-extension elision later
(Eq(8|16) x y) && x.Type.IsSigned() && y.Type.IsSigned() => (Equal (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
(Eq(8|16) x y) => (Equal (CMPW (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
(If (FGreaterThan cc) yes no) => (FGT cc yes no)
(If (FGreaterEqual cc) yes no) => (FGE cc yes no)
-(If cond yes no) => (NE (Select1 <types.TypeFlags> (ANDCCconst [1] cond)) yes no)
+(If cond yes no) => (NE (CMPconst [0] (ANDconst [1] cond)) yes no)
// Absorb boolean tests into block
-(NE (Select1 (ANDCCconst [1] ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) cc))) yes no) => ((EQ|NE|LT|LE|GT|GE) cc yes no)
-(NE (Select1 (ANDCCconst [1] ((FLessThan|FLessEqual|FGreaterThan|FGreaterEqual) cc))) yes no) => ((FLT|FLE|FGT|FGE) cc yes no)
+(NE (CMPconst [0] (ANDconst [1] ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) cc))) yes no) => ((EQ|NE|LT|LE|GT|GE) cc yes no)
+(NE (CMPconst [0] (ANDconst [1] ((FLessThan|FLessEqual|FGreaterThan|FGreaterEqual) cc))) yes no) => ((FLT|FLE|FGT|FGE) cc yes no)
// absorb flag constants into branches
(EQ (FlagEQ) yes no) => (First yes no)
// Elide compares of bit tests
-((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> z) yes no)
-((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> z) yes no)
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ORCC x y)) yes no)
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (XORCC x y)) yes no)
(CondSelect x y (SETBC [a] cmp)) => (ISEL [a] x y cmp)
(CondSelect x y (SETBCR [a] cmp)) => (ISEL [a+4] x y cmp)
// Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably.
-(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [1] bool)))
+(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (CMPconst [0] (ANDconst [1] bool)))
// Fold any CR -> GPR -> CR transfers when applying the above rule.
-(ISEL [6] x y (Select1 (ANDCCconst [1] (SETBC [c] cmp)))) => (ISEL [c] x y cmp)
+(ISEL [6] x y (CMPconst [0] (ANDconst [1] (SETBC [c] cmp)))) => (ISEL [c] x y cmp)
(ISEL [6] x y ((CMP|CMPW)const [0] (SETBC [c] cmp))) => (ISEL [c] x y cmp)
(ISEL [6] x y ((CMP|CMPW)const [0] (SETBCR [c] cmp))) => (ISEL [c+4] x y cmp)
// Discover consts
(AND x (MOVDconst [-1])) => x
-(AND x (MOVDconst [c])) && isU16Bit(c) => (Select0 (ANDCCconst [c] x))
+(AND x (MOVDconst [c])) && isU16Bit(c) => (ANDconst [c] x)
(XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x)
(OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x)
// Simplify consts
-(ANDCCconst [c] (Select0 (ANDCCconst [d] x))) => (ANDCCconst [c&d] x)
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
-(Select0 (ANDCCconst [-1] x)) => x
-(Select0 (ANDCCconst [0] _)) => (MOVDconst [0])
-(Select1 (ANDCCconst [0] _)) => (FlagEQ)
+(ANDconst [-1] x) => x
+(ANDconst [0] _) => (MOVDconst [0])
(XORconst [0] x) => x
(ORconst [-1] _) => (MOVDconst [-1])
(ORconst [0] x) => x
// zero-extend of small and => small and
-(MOVBZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFF => y
-(MOVHZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFF => y
-(MOVWZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFFFFFF => y
+(MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF => y
+(MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y
+(MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF => y
(MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF => y
// sign extend of small-positive and => small-positive-and
-(MOVBreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0x7F => y
-(MOVHreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0x7FFF => y
-(MOVWreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
+(MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F => y
+(MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF => y
+(MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
(MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF => y
// small and of zero-extend => either zero-extend or small and
-(Select0 (ANDCCconst [c] y:(MOVBZreg _))) && c&0xFF == 0xFF => y
-(Select0 (ANDCCconst [0xFF] (MOVBreg x))) => (MOVBZreg x)
-(Select0 (ANDCCconst [c] y:(MOVHZreg _))) && c&0xFFFF == 0xFFFF => y
-(Select0 (ANDCCconst [0xFFFF] (MOVHreg x))) => (MOVHZreg x)
+(ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF => y
+(ANDconst [0xFF] (MOVBreg x)) => (MOVBZreg x)
+(ANDconst [c] y:(MOVHZreg _)) && c&0xFFFF == 0xFFFF => y
+(ANDconst [0xFFFF] (MOVHreg x)) => (MOVHZreg x)
(AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF => y
(AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x)
// normal case
-(Select0 (ANDCCconst [c] (MOVBZreg x))) => (Select0 (ANDCCconst [c&0xFF] x))
-(Select0 (ANDCCconst [c] (MOVHZreg x))) => (Select0 (ANDCCconst [c&0xFFFF] x))
-(Select0 (ANDCCconst [c] (MOVWZreg x))) => (Select0 (ANDCCconst [c&0xFFFFFFFF] x))
+(ANDconst [c] (MOVBZreg x)) => (ANDconst [c&0xFF] x)
+(ANDconst [c] (MOVHZreg x)) => (ANDconst [c&0xFFFF] x)
+(ANDconst [c] (MOVWZreg x)) => (ANDconst [c&0xFFFFFFFF] x)
// Eliminate unnecessary sign/zero extend following right shift
(MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x))
(MOVBZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFF,r)] y)
(MOVHZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFFFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFFFF,r)] y)
(MOVWZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFFFFFFFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFFFFFFFF,r)] y)
-(Select0 (ANDCCconst [m] (RLWINM [r] y))) && mergePPC64AndRlwinm(uint32(m),r) != 0 => (RLWINM [mergePPC64AndRlwinm(uint32(m),r)] y)
+(ANDconst [m] (RLWINM [r] y)) && mergePPC64AndRlwinm(uint32(m),r) != 0 => (RLWINM [mergePPC64AndRlwinm(uint32(m),r)] y)
(SLDconst [s] (RLWINM [r] y)) && mergePPC64SldiRlwinm(s,r) != 0 => (RLWINM [mergePPC64SldiRlwinm(s,r)] y)
(RLWINM [r] (MOVHZreg u)) && mergePPC64RlwinmAnd(r,0xFFFF) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,0xFFFF)] u)
-(RLWINM [r] (Select0 (ANDCCconst [a] u))) && mergePPC64RlwinmAnd(r,uint32(a)) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,uint32(a))] u)
+(RLWINM [r] (ANDconst [a] u)) && mergePPC64RlwinmAnd(r,uint32(a)) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,uint32(a))] u)
// SLWconst is a special case of RLWNM which always zero-extends the result.
(SLWconst [s] (MOVWZreg w)) => (SLWconst [s] w)
(MOVWZreg w:(SLWconst u)) => w
(MOVBZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
(MOVBZreg ((OR|XOR|AND) <t> x (MOVBZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
-(MOV(B|H|W)Zreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) => z
+(MOV(B|H|W)Zreg z:(ANDconst [c] (MOVBZload ptr x))) => z
(MOV(B|H|W)Zreg z:(AND y (MOV(B|H|W)Zload ptr x))) => z
-(MOV(H|W)Zreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x)))) => z
-(MOVWZreg z:(Select0 (ANDCCconst [c] (MOVWZload ptr x)))) => z
+(MOV(H|W)Zreg z:(ANDconst [c] (MOVHZload ptr x))) => z
+(MOVWZreg z:(ANDconst [c] (MOVWZload ptr x))) => z
// Arithmetic constant ops
(AtomicOr(8|32) ...) => (LoweredAtomicOr(8|32) ...)
(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
-(Select0 (ANDCCconst [1] z:(SRADconst [63] x))) && z.Uses == 1 => (SRDconst [63] x)
+(ANDconst [1] z:(SRADconst [63] x)) && z.Uses == 1 => (SRDconst [63] x)
// Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
// This may interact with other patterns in the future. (Compare with arm64)
(SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x)
(SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x)
-(SLDconst [c] z:(Select0 (ANDCCconst [d] x))) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
+(SLDconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
(SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
(SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
(SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x)
-(SLWconst [c] z:(Select0 (ANDCCconst [d] x))) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
+(SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
(SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
// special case for power9
(SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && buildcfg.GOPPC64 >= 9 => (EXTSWSLconst [c] x)
// Canonicalize the order of arguments to comparisons - helps with CSE.
((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
-// n is always a zero-extended uint16 value, so n & z is always a non-negative 32 or 64 bit value. Use the flag result of ANDCCconst.
-((CMP|CMPW|CMPU|CMPWU)const [0] (Select0 a:(ANDCCconst [n] z))) => (Select1 <types.TypeFlags> a)
+// n is always a zero-extended uint16 value, so n & z is always a non-negative 32 or 64 bit value.
+// Rewrite to a cmp int64(0) to lower into ANDCCconst in the latelower pass.
+(CMP(W|U|WU)const [0] a:(ANDconst [n] z)) => (CMPconst [0] a)
// SETBC auxInt values 0=LT 1=GT 2=EQ Crbit==1 ? 1 : 0
// SETBCR auxInt values 0=LT 1=GT 2=EQ Crbit==1 ? 0 : 1
(XORconst [1] (SETBCR [n] cmp)) => (SETBC [n] cmp)
(XORconst [1] (SETBC [n] cmp)) => (SETBCR [n] cmp)
-(SETBC [2] (Select1 a:(ANDCCconst <t> [1] _))) => (XORconst [1] (Select0 <t.FieldType(0)> a))
-(SETBCR [2] (Select1 a:(ANDCCconst [1] _))) => (Select0 a)
+(SETBC [2] (CMPconst [0] a:(ANDconst [1] _))) => (XORconst [1] a)
+(SETBCR [2] (CMPconst [0] a:(ANDconst [1] _))) => a
// Only CMPconst for these in case AND|OR|XOR result is > 32 bits
(SETBC [2] (CMPconst [0] a:(AND y z))) && a.Uses == 1 => (SETBC [2] (Select1 <types.TypeFlags> (ANDCC y z )))
(SETBCR [2] (CMPconst [0] a:(XOR y z))) && a.Uses == 1 => (SETBCR [2] (Select1 <types.TypeFlags> (XORCC y z )))
// A particular pattern seen in cgo code:
-(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (Select0 (ANDCCconst [c&0xFF] x))
+(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (ANDconst [c&0xFF] x)
// floating point negative abs
(FNEG (F(ABS|NABS) x)) => (F(NABS|ABS) x)
return rewriteValuePPC64_OpPPC64ADDconst(v)
case OpPPC64AND:
return rewriteValuePPC64_OpPPC64AND(v)
- case OpPPC64ANDCCconst:
- return rewriteValuePPC64_OpPPC64ANDCCconst(v)
case OpPPC64ANDN:
return rewriteValuePPC64_OpPPC64ANDN(v)
+ case OpPPC64ANDconst:
+ return rewriteValuePPC64_OpPPC64ANDconst(v)
case OpPPC64BRD:
return rewriteValuePPC64_OpPPC64BRD(v)
case OpPPC64BRH:
}
// match: (CondSelect x y bool)
// cond: flagArg(bool) == nil
- // result: (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [1] bool)))
+ // result: (ISEL [6] x y (CMPconst [0] (ANDconst [1] bool)))
for {
x := v_0
y := v_1
}
v.reset(OpPPC64ISEL)
v.AuxInt = int32ToAuxInt(6)
- v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v1.AuxInt = int64ToAuxInt(1)
v1.AddArg(bool)
v0.AddArg(v1)
b := v.Block
typ := &b.Func.Config.Types
// match: (EqB x y)
- // result: (Select0 <typ.Int> (ANDCCconst [1] (EQV x y)))
+ // result: (ANDconst [1] (EQV x y))
for {
x := v_0
y := v_1
- v.reset(OpSelect0)
- v.Type = typ.Int
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v0.AuxInt = int64ToAuxInt(1)
- v1 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64)
- v1.AddArg2(x, y)
- v0.AddArg(v1)
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64)
+ v0.AddArg2(x, y)
v.AddArg(v0)
return true
}
return true
}
// match: (Lsh16x16 <t> x y)
- // result: (ISEL [2] (SLD <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
+ // result: (ISEL [2] (SLD <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF0] y)))
for {
t := v.Type
x := v_0
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v2.AuxInt = int64ToAuxInt(0)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0xFFF0)
v4.AddArg(y)
v3.AddArg(v4)
return true
}
// match: (Lsh16x8 <t> x y)
- // result: (ISEL [2] (SLD <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
+ // result: (ISEL [2] (SLD <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F0] y)))
for {
t := v.Type
x := v_0
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v2.AuxInt = int64ToAuxInt(0)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0x00F0)
v4.AddArg(y)
v3.AddArg(v4)
return true
}
// match: (Lsh32x16 <t> x y)
- // result: (ISEL [2] (SLW <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
+ // result: (ISEL [2] (SLW <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFE0] y)))
for {
t := v.Type
x := v_0
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v1.AuxInt = int64ToAuxInt(0)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0xFFE0)
v3.AddArg(y)
v2.AddArg(v3)
return true
}
// match: (Lsh32x8 <t> x y)
- // result: (ISEL [2] (SLW <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
+ // result: (ISEL [2] (SLW <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00E0] y)))
for {
t := v.Type
x := v_0
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v1.AuxInt = int64ToAuxInt(0)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0x00E0)
v3.AddArg(y)
v2.AddArg(v3)
return true
}
// match: (Lsh64x16 <t> x y)
- // result: (ISEL [2] (SLD <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
+ // result: (ISEL [2] (SLD <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFC0] y)))
for {
t := v.Type
x := v_0
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v1.AuxInt = int64ToAuxInt(0)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0xFFC0)
v3.AddArg(y)
v2.AddArg(v3)
return true
}
// match: (Lsh64x8 <t> x y)
- // result: (ISEL [2] (SLD <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
+ // result: (ISEL [2] (SLD <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00C0] y)))
for {
t := v.Type
x := v_0
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v1.AuxInt = int64ToAuxInt(0)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0x00C0)
v3.AddArg(y)
v2.AddArg(v3)
return true
}
// match: (Lsh8x16 <t> x y)
- // result: (ISEL [2] (SLD <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
+ // result: (ISEL [2] (SLD <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF8] y)))
for {
t := v.Type
x := v_0
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v2.AuxInt = int64ToAuxInt(0)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0xFFF8)
v4.AddArg(y)
v3.AddArg(v4)
return true
}
// match: (Lsh8x8 <t> x y)
- // result: (ISEL [2] (SLD <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
+ // result: (ISEL [2] (SLD <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F8] y)))
for {
t := v.Type
x := v_0
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v2.AuxInt = int64ToAuxInt(0)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0x00F8)
v4.AddArg(y)
v3.AddArg(v4)
func rewriteValuePPC64_OpPPC64AND(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
// match: (AND (MOVDconst [m]) (ROTLWconst [r] x))
// cond: isPPC64WordRotateMask(m)
// result: (RLWINM [encodePPC64RotateMask(r,m,32)] x)
}
// match: (AND x (MOVDconst [c]))
// cond: isU16Bit(c)
- // result: (Select0 (ANDCCconst [c] x))
+ // result: (ANDconst [c] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if !(isU16Bit(c)) {
continue
}
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v0.AuxInt = int64ToAuxInt(c)
- v0.AddArg(x)
- v.AddArg(v0)
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
return true
}
break
break
}
// match: (AND (MOVDconst [c]) x:(MOVBZload _ _))
- // result: (Select0 (ANDCCconst [c&0xFF] x))
+ // result: (ANDconst [c&0xFF] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpPPC64MOVDconst {
if x.Op != OpPPC64MOVBZload {
continue
}
- v.reset(OpSelect0)
- v0 := b.NewValue0(x.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v0.AuxInt = int64ToAuxInt(c & 0xFF)
- v0.AddArg(x)
- v.AddArg(v0)
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFF)
+ v.AddArg(x)
return true
}
break
}
return false
}
-func rewriteValuePPC64_OpPPC64ANDCCconst(v *Value) bool {
+func rewriteValuePPC64_OpPPC64ANDN(v *Value) bool {
+ v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (ANDCCconst [c] (Select0 (ANDCCconst [d] x)))
- // result: (ANDCCconst [c&d] x)
+ // match: (ANDN (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c&^d])
for {
- c := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpSelect0 {
+ if v_0.Op != OpPPC64MOVDconst {
break
}
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst {
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
break
}
- d := auxIntToInt64(v_0_0.AuxInt)
- x := v_0_0.Args[0]
- v.reset(OpPPC64ANDCCconst)
- v.AuxInt = int64ToAuxInt(c & d)
- v.AddArg(x)
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c &^ d)
return true
}
return false
}
-func rewriteValuePPC64_OpPPC64ANDN(v *Value) bool {
- v_1 := v.Args[1]
+func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (ANDN (MOVDconst [c]) (MOVDconst [d]))
- // result: (MOVDconst [c&^d])
+ // match: (ANDconst [m] (ROTLWconst [r] x))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x)
for {
- if v_0.Op != OpPPC64MOVDconst {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ROTLWconst {
break
}
- c := auxIntToInt64(v_0.AuxInt)
- if v_1.Op != OpPPC64MOVDconst {
+ r := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [m] (ROTLW x r))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ROTLW {
+ break
+ }
+ r := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLWNM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
+ v.AddArg2(x, r)
+ return true
+ }
+ // match: (ANDconst [m] (SRWconst x [s]))
+ // cond: mergePPC64RShiftMask(m,s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ if !(mergePPC64RShiftMask(m, s, 32) == 0) {
break
}
- d := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64MOVDconst)
- v.AuxInt = int64ToAuxInt(c &^ d)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [m] (SRWconst x [s]))
+ // cond: mergePPC64AndSrwi(m,s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m,s)] x)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64AndSrwi(m, s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [0] _)
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [c] y:(MOVBZreg _))
+ // cond: c&0xFF == 0xFF
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpPPC64MOVBZreg || !(c&0xFF == 0xFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ANDconst [0xFF] (MOVBreg x))
+ // result: (MOVBZreg x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0xFF || v_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] y:(MOVHZreg _))
+ // cond: c&0xFFFF == 0xFFFF
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpPPC64MOVHZreg || !(c&0xFFFF == 0xFFFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ANDconst [0xFFFF] (MOVHreg x))
+ // result: (MOVHZreg x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0xFFFF || v_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVBZreg x))
+ // result: (ANDconst [c&0xFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFF)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVHZreg x))
+ // result: (ANDconst [c&0xFFFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFFFF)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVWZreg x))
+ // result: (ANDconst [c&0xFFFFFFFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [m] (RLWINM [r] y))
+ // cond: mergePPC64AndRlwinm(uint32(m),r) != 0
+ // result: (RLWINM [mergePPC64AndRlwinm(uint32(m),r)] y)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64RLWINM {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if !(mergePPC64AndRlwinm(uint32(m), r) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndRlwinm(uint32(m), r))
+ v.AddArg(y)
+ return true
+ }
+ // match: (ANDconst [1] z:(SRADconst [63] x))
+ // cond: z.Uses == 1
+ // result: (SRDconst [63] x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64SRADconst || auxIntToInt64(z.AuxInt) != 63 {
+ break
+ }
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64CMPUconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (CMPUconst [d] (Select0 (ANDCCconst z [c])))
+ // match: (CMPUconst [d] (ANDconst z [c]))
// cond: uint64(d) > uint64(c)
// result: (FlagLT)
for {
d := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpSelect0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst {
+ if v_0.Op != OpPPC64ANDconst {
break
}
- c := auxIntToInt64(v_0_0.AuxInt)
+ c := auxIntToInt64(v_0.AuxInt)
if !(uint64(d) > uint64(c)) {
break
}
v.reset(OpPPC64FlagGT)
return true
}
- // match: (CMPUconst [0] (Select0 a:(ANDCCconst [n] z)))
- // result: (Select1 <types.TypeFlags> a)
+ // match: (CMPUconst [0] a:(ANDconst [n] z))
+ // result: (CMPconst [0] a)
for {
- if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
- a := v_0.Args[0]
- if a.Op != OpPPC64ANDCCconst {
+ a := v_0
+ if a.Op != OpPPC64ANDconst {
break
}
- v.reset(OpSelect1)
- v.Type = types.TypeFlags
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
v.AddArg(a)
return true
}
}
func rewriteValuePPC64_OpPPC64CMPWUconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (CMPWUconst [d] (Select0 (ANDCCconst z [c])))
+ // match: (CMPWUconst [d] (ANDconst z [c]))
// cond: uint64(d) > uint64(c)
// result: (FlagLT)
for {
d := auxIntToInt32(v.AuxInt)
- if v_0.Op != OpSelect0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst {
+ if v_0.Op != OpPPC64ANDconst {
break
}
- c := auxIntToInt64(v_0_0.AuxInt)
+ c := auxIntToInt64(v_0.AuxInt)
if !(uint64(d) > uint64(c)) {
break
}
v.reset(OpPPC64FlagGT)
return true
}
- // match: (CMPWUconst [0] (Select0 a:(ANDCCconst [n] z)))
- // result: (Select1 <types.TypeFlags> a)
+ // match: (CMPWUconst [0] a:(ANDconst [n] z))
+ // result: (CMPconst [0] a)
for {
- if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
- a := v_0.Args[0]
- if a.Op != OpPPC64ANDCCconst {
+ a := v_0
+ if a.Op != OpPPC64ANDconst {
break
}
- v.reset(OpSelect1)
- v.Type = types.TypeFlags
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
v.AddArg(a)
return true
}
v.reset(OpPPC64FlagGT)
return true
}
- // match: (CMPWconst [0] (Select0 a:(ANDCCconst [n] z)))
- // result: (Select1 <types.TypeFlags> a)
+ // match: (CMPWconst [0] a:(ANDconst [n] z))
+ // result: (CMPconst [0] a)
for {
- if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
- a := v_0.Args[0]
- if a.Op != OpPPC64ANDCCconst {
+ a := v_0
+ if a.Op != OpPPC64ANDconst {
break
}
- v.reset(OpSelect1)
- v.Type = types.TypeFlags
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
v.AddArg(a)
return true
}
v.reset(OpPPC64FlagGT)
return true
}
- // match: (CMPconst [0] (Select0 a:(ANDCCconst [n] z)))
- // result: (Select1 <types.TypeFlags> a)
- for {
- if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
- break
- }
- a := v_0.Args[0]
- if a.Op != OpPPC64ANDCCconst {
- break
- }
- v.reset(OpSelect1)
- v.Type = types.TypeFlags
- v.AddArg(a)
- return true
- }
return false
}
func rewriteValuePPC64_OpPPC64Equal(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (ISEL [6] x y (Select1 (ANDCCconst [1] (SETBC [c] cmp))))
+ // match: (ISEL [6] x y (CMPconst [0] (ANDconst [1] (SETBC [c] cmp))))
// result: (ISEL [c] x y cmp)
for {
if auxIntToInt32(v.AuxInt) != 6 {
}
x := v_0
y := v_1
- if v_2.Op != OpSelect1 {
+ if v_2.Op != OpPPC64CMPconst || auxIntToInt64(v_2.AuxInt) != 0 {
break
}
v_2_0 := v_2.Args[0]
- if v_2_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0.AuxInt) != 1 {
+ if v_2_0.Op != OpPPC64ANDconst || auxIntToInt64(v_2_0.AuxInt) != 1 {
break
}
v_2_0_0 := v_2_0.Args[0]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (MOVBZreg y:(Select0 (ANDCCconst [c] _)))
+ // match: (MOVBZreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0xFF
// result: y
for {
y := v_0
- if y.Op != OpSelect0 {
+ if y.Op != OpPPC64ANDconst {
break
}
- y_0 := y.Args[0]
- if y_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(y_0.AuxInt)
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0xFF) {
break
}
}
break
}
- // match: (MOVBZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x))))
+ // match: (MOVBZreg z:(ANDconst [c] (MOVBZload ptr x)))
// result: z
for {
z := v_0
- if z.Op != OpSelect0 {
+ if z.Op != OpPPC64ANDconst {
break
}
z_0 := z.Args[0]
- if z_0.Op != OpPPC64ANDCCconst {
- break
- }
- z_0_0 := z_0.Args[0]
- if z_0_0.Op != OpPPC64MOVBZload {
+ if z_0.Op != OpPPC64MOVBZload {
break
}
v.copyOf(z)
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (MOVBreg y:(Select0 (ANDCCconst [c] _)))
+ // match: (MOVBreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0x7F
// result: y
for {
y := v_0
- if y.Op != OpSelect0 {
+ if y.Op != OpPPC64ANDconst {
break
}
- y_0 := y.Args[0]
- if y_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(y_0.AuxInt)
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0x7F) {
break
}
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (MOVHZreg y:(Select0 (ANDCCconst [c] _)))
+ // match: (MOVHZreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0xFFFF
// result: y
for {
y := v_0
- if y.Op != OpSelect0 {
+ if y.Op != OpPPC64ANDconst {
break
}
- y_0 := y.Args[0]
- if y_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(y_0.AuxInt)
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0xFFFF) {
break
}
}
break
}
- // match: (MOVHZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x))))
+ // match: (MOVHZreg z:(ANDconst [c] (MOVBZload ptr x)))
// result: z
for {
z := v_0
- if z.Op != OpSelect0 {
+ if z.Op != OpPPC64ANDconst {
break
}
z_0 := z.Args[0]
- if z_0.Op != OpPPC64ANDCCconst {
- break
- }
- z_0_0 := z_0.Args[0]
- if z_0_0.Op != OpPPC64MOVBZload {
+ if z_0.Op != OpPPC64MOVBZload {
break
}
v.copyOf(z)
}
break
}
- // match: (MOVHZreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x))))
+ // match: (MOVHZreg z:(ANDconst [c] (MOVHZload ptr x)))
// result: z
for {
z := v_0
- if z.Op != OpSelect0 {
+ if z.Op != OpPPC64ANDconst {
break
}
z_0 := z.Args[0]
- if z_0.Op != OpPPC64ANDCCconst {
- break
- }
- z_0_0 := z_0.Args[0]
- if z_0_0.Op != OpPPC64MOVHZload {
+ if z_0.Op != OpPPC64MOVHZload {
break
}
v.copyOf(z)
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (MOVHreg y:(Select0 (ANDCCconst [c] _)))
+ // match: (MOVHreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0x7FFF
// result: y
for {
y := v_0
- if y.Op != OpSelect0 {
- break
- }
- y_0 := y.Args[0]
- if y_0.Op != OpPPC64ANDCCconst {
+ if y.Op != OpPPC64ANDconst {
break
}
- c := auxIntToInt64(y_0.AuxInt)
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0x7FFF) {
break
}
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (MOVWZreg y:(Select0 (ANDCCconst [c] _)))
+ // match: (MOVWZreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0xFFFFFFFF
// result: y
for {
y := v_0
- if y.Op != OpSelect0 {
- break
- }
- y_0 := y.Args[0]
- if y_0.Op != OpPPC64ANDCCconst {
+ if y.Op != OpPPC64ANDconst {
break
}
- c := auxIntToInt64(y_0.AuxInt)
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0xFFFFFFFF) {
break
}
}
break
}
- // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x))))
+ // match: (MOVWZreg z:(ANDconst [c] (MOVBZload ptr x)))
// result: z
for {
z := v_0
- if z.Op != OpSelect0 {
+ if z.Op != OpPPC64ANDconst {
break
}
z_0 := z.Args[0]
- if z_0.Op != OpPPC64ANDCCconst {
- break
- }
- z_0_0 := z_0.Args[0]
- if z_0_0.Op != OpPPC64MOVBZload {
+ if z_0.Op != OpPPC64MOVBZload {
break
}
v.copyOf(z)
}
break
}
- // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x))))
+ // match: (MOVWZreg z:(ANDconst [c] (MOVHZload ptr x)))
// result: z
for {
z := v_0
- if z.Op != OpSelect0 {
+ if z.Op != OpPPC64ANDconst {
break
}
z_0 := z.Args[0]
- if z_0.Op != OpPPC64ANDCCconst {
- break
- }
- z_0_0 := z_0.Args[0]
- if z_0_0.Op != OpPPC64MOVHZload {
+ if z_0.Op != OpPPC64MOVHZload {
break
}
v.copyOf(z)
return true
}
- // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVWZload ptr x))))
+ // match: (MOVWZreg z:(ANDconst [c] (MOVWZload ptr x)))
// result: z
for {
z := v_0
- if z.Op != OpSelect0 {
+ if z.Op != OpPPC64ANDconst {
break
}
z_0 := z.Args[0]
- if z_0.Op != OpPPC64ANDCCconst {
- break
- }
- z_0_0 := z_0.Args[0]
- if z_0_0.Op != OpPPC64MOVWZload {
+ if z_0.Op != OpPPC64MOVWZload {
break
}
v.copyOf(z)
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (MOVWreg y:(Select0 (ANDCCconst [c] _)))
+ // match: (MOVWreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0xFFFF
// result: y
for {
y := v_0
- if y.Op != OpSelect0 {
+ if y.Op != OpPPC64ANDconst {
break
}
- y_0 := y.Args[0]
- if y_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(y_0.AuxInt)
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0xFFFF) {
break
}
v.AddArg(u)
return true
}
- // match: (RLWINM [r] (Select0 (ANDCCconst [a] u)))
+ // match: (RLWINM [r] (ANDconst [a] u))
// cond: mergePPC64RlwinmAnd(r,uint32(a)) != 0
// result: (RLWINM [mergePPC64RlwinmAnd(r,uint32(a))] u)
for {
r := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpSelect0 {
+ if v_0.Op != OpPPC64ANDconst {
break
}
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst {
- break
- }
- a := auxIntToInt64(v_0_0.AuxInt)
- u := v_0_0.Args[0]
+ a := auxIntToInt64(v_0.AuxInt)
+ u := v_0.Args[0]
if !(mergePPC64RlwinmAnd(r, uint32(a)) != 0) {
break
}
}
break
}
- // match: (ROTLWconst [r] (Select0 (ANDCCconst [m] x)))
+ // match: (ROTLWconst [r] (ANDconst [m] x))
// cond: isPPC64WordRotateMask(m)
// result: (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
for {
r := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpSelect0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst {
+ if v_0.Op != OpPPC64ANDconst {
break
}
- m := auxIntToInt64(v_0_0.AuxInt)
- x := v_0_0.Args[0]
+ m := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
if !(isPPC64WordRotateMask(m)) {
break
}
v.AddArg(bool)
return true
}
- // match: (SETBC [2] (Select1 a:(ANDCCconst <t> [1] _)))
- // result: (XORconst [1] (Select0 <t.FieldType(0)> a))
+ // match: (SETBC [2] (CMPconst [0] a:(ANDconst [1] _)))
+ // result: (XORconst [1] a)
for {
- if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpSelect1 {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
break
}
a := v_0.Args[0]
- if a.Op != OpPPC64ANDCCconst {
- break
- }
- t := a.Type
- if auxIntToInt64(a.AuxInt) != 1 {
+ if a.Op != OpPPC64ANDconst || auxIntToInt64(a.AuxInt) != 1 {
break
}
v.reset(OpPPC64XORconst)
v.AuxInt = int64ToAuxInt(1)
- v0 := b.NewValue0(v.Pos, OpSelect0, t.FieldType(0))
- v0.AddArg(a)
- v.AddArg(v0)
+ v.AddArg(a)
return true
}
// match: (SETBC [2] (CMPconst [0] a:(AND y z)))
v.AddArg(bool)
return true
}
- // match: (SETBCR [2] (Select1 a:(ANDCCconst [1] _)))
- // result: (Select0 a)
+ // match: (SETBCR [2] (CMPconst [0] a:(ANDconst [1] _)))
+ // result: a
for {
- if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpSelect1 {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
break
}
a := v_0.Args[0]
- if a.Op != OpPPC64ANDCCconst || auxIntToInt64(a.AuxInt) != 1 {
+ if a.Op != OpPPC64ANDconst || auxIntToInt64(a.AuxInt) != 1 {
break
}
- v.reset(OpSelect0)
- v.AddArg(a)
+ v.copyOf(a)
return true
}
// match: (SETBCR [2] (CMPconst [0] a:(AND y z)))
v.AddArg(x)
return true
}
- // match: (SLDconst [c] z:(Select0 (ANDCCconst [d] x)))
+ // match: (SLDconst [c] z:(ANDconst [d] x))
// cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))
// result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
for {
c := auxIntToInt64(v.AuxInt)
z := v_0
- if z.Op != OpSelect0 {
+ if z.Op != OpPPC64ANDconst {
break
}
- z_0 := z.Args[0]
- if z_0.Op != OpPPC64ANDCCconst {
- break
- }
- d := auxIntToInt64(z_0.AuxInt)
- x := z_0.Args[0]
+ d := auxIntToInt64(z.AuxInt)
+ x := z.Args[0]
if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))) {
break
}
v.AddArg(x)
return true
}
- // match: (SLWconst [c] z:(Select0 (ANDCCconst [d] x)))
+ // match: (SLWconst [c] z:(ANDconst [d] x))
// cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d))
// result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
for {
c := auxIntToInt64(v.AuxInt)
z := v_0
- if z.Op != OpSelect0 {
- break
- }
- z_0 := z.Args[0]
- if z_0.Op != OpPPC64ANDCCconst {
+ if z.Op != OpPPC64ANDconst {
break
}
- d := auxIntToInt64(z_0.AuxInt)
- x := z_0.Args[0]
+ d := auxIntToInt64(z.AuxInt)
+ x := z.Args[0]
if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (32-getPPC64ShiftMaskLength(d))) {
break
}
}
func rewriteValuePPC64_OpPPC64SRWconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (SRWconst (Select0 (ANDCCconst [m] x)) [s])
+ // match: (SRWconst (ANDconst [m] x) [s])
// cond: mergePPC64RShiftMask(m>>uint(s),s,32) == 0
// result: (MOVDconst [0])
for {
s := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpSelect0 {
+ if v_0.Op != OpPPC64ANDconst {
break
}
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst {
- break
- }
- m := auxIntToInt64(v_0_0.AuxInt)
+ m := auxIntToInt64(v_0.AuxInt)
if !(mergePPC64RShiftMask(m>>uint(s), s, 32) == 0) {
break
}
v.AuxInt = int64ToAuxInt(0)
return true
}
- // match: (SRWconst (Select0 (ANDCCconst [m] x)) [s])
+ // match: (SRWconst (ANDconst [m] x) [s])
// cond: mergePPC64AndSrwi(m>>uint(s),s) != 0
// result: (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
for {
s := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpSelect0 {
+ if v_0.Op != OpPPC64ANDconst {
break
}
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst {
- break
- }
- m := auxIntToInt64(v_0_0.AuxInt)
- x := v_0_0.Args[0]
+ m := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
if !(mergePPC64AndSrwi(m>>uint(s), s) != 0) {
break
}
return true
}
// match: (Rsh16Ux16 <t> x y)
- // result: (ISEL [2] (SRD <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
+ // result: (ISEL [2] (SRD <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF0] y)))
for {
t := v.Type
x := v_0
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v2.AuxInt = int64ToAuxInt(0)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0xFFF0)
v4.AddArg(y)
v3.AddArg(v4)
return true
}
// match: (Rsh16Ux8 <t> x y)
- // result: (ISEL [2] (SRD <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
+ // result: (ISEL [2] (SRD <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F0] y)))
for {
t := v.Type
x := v_0
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v2.AuxInt = int64ToAuxInt(0)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0x00F0)
v4.AddArg(y)
v3.AddArg(v4)
return true
}
// match: (Rsh16x16 <t> x y)
- // result: (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
+ // result: (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0xFFF0] y)))
for {
t := v.Type
x := v_0
v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
v2.AuxInt = int64ToAuxInt(15)
v2.AddArg(v1)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0xFFF0)
v4.AddArg(y)
v3.AddArg(v4)
return true
}
// match: (Rsh16x8 <t> x y)
- // result: (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
+ // result: (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0x00F0] y)))
for {
t := v.Type
x := v_0
v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
v2.AuxInt = int64ToAuxInt(15)
v2.AddArg(v1)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0x00F0)
v4.AddArg(y)
v3.AddArg(v4)
return true
}
// match: (Rsh32Ux16 <t> x y)
- // result: (ISEL [2] (SRW <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
+ // result: (ISEL [2] (SRW <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFE0] y)))
for {
t := v.Type
x := v_0
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v1.AuxInt = int64ToAuxInt(0)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0xFFE0)
v3.AddArg(y)
v2.AddArg(v3)
return true
}
// match: (Rsh32Ux8 <t> x y)
- // result: (ISEL [2] (SRW <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
+ // result: (ISEL [2] (SRW <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00E0] y)))
for {
t := v.Type
x := v_0
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v1.AuxInt = int64ToAuxInt(0)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0x00E0)
v3.AddArg(y)
v2.AddArg(v3)
return true
}
// match: (Rsh32x16 <t> x y)
- // result: (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
+ // result: (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (CMPconst [0] (ANDconst [0xFFE0] y)))
for {
t := v.Type
x := v_0
v1 := b.NewValue0(v.Pos, OpPPC64SRAWconst, t)
v1.AuxInt = int64ToAuxInt(31)
v1.AddArg(x)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0xFFE0)
v3.AddArg(y)
v2.AddArg(v3)
return true
}
// match: (Rsh32x8 <t> x y)
- // result: (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
+ // result: (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (CMPconst [0] (ANDconst [0x00E0] y)))
for {
t := v.Type
x := v_0
v1 := b.NewValue0(v.Pos, OpPPC64SRAWconst, t)
v1.AuxInt = int64ToAuxInt(31)
v1.AddArg(x)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0x00E0)
v3.AddArg(y)
v2.AddArg(v3)
return true
}
// match: (Rsh64Ux16 <t> x y)
- // result: (ISEL [2] (SRD <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
+ // result: (ISEL [2] (SRD <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFC0] y)))
for {
t := v.Type
x := v_0
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v1.AuxInt = int64ToAuxInt(0)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0xFFC0)
v3.AddArg(y)
v2.AddArg(v3)
return true
}
// match: (Rsh64Ux8 <t> x y)
- // result: (ISEL [2] (SRD <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
+ // result: (ISEL [2] (SRD <t> x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00C0] y)))
for {
t := v.Type
x := v_0
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v1.AuxInt = int64ToAuxInt(0)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0x00C0)
v3.AddArg(y)
v2.AddArg(v3)
return true
}
// match: (Rsh64x16 <t> x y)
- // result: (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
+ // result: (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (CMPconst [0] (ANDconst [0xFFC0] y)))
for {
t := v.Type
x := v_0
v1 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
v1.AuxInt = int64ToAuxInt(63)
v1.AddArg(x)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0xFFC0)
v3.AddArg(y)
v2.AddArg(v3)
return true
}
// match: (Rsh64x8 <t> x y)
- // result: (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
+ // result: (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (CMPconst [0] (ANDconst [0x00C0] y)))
for {
t := v.Type
x := v_0
v1 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
v1.AuxInt = int64ToAuxInt(63)
v1.AddArg(x)
- v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v3.AuxInt = int64ToAuxInt(0x00C0)
v3.AddArg(y)
v2.AddArg(v3)
return true
}
// match: (Rsh8Ux16 <t> x y)
- // result: (ISEL [2] (SRD <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
+ // result: (ISEL [2] (SRD <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF8] y)))
for {
t := v.Type
x := v_0
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v2.AuxInt = int64ToAuxInt(0)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0xFFF8)
v4.AddArg(y)
v3.AddArg(v4)
return true
}
// match: (Rsh8Ux8 <t> x y)
- // result: (ISEL [2] (SRD <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
+ // result: (ISEL [2] (SRD <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F8] y)))
for {
t := v.Type
x := v_0
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v2.AuxInt = int64ToAuxInt(0)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0x00F8)
v4.AddArg(y)
v3.AddArg(v4)
return true
}
// match: (Rsh8x16 <t> x y)
- // result: (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
+ // result: (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0xFFF8] y)))
for {
t := v.Type
x := v_0
v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
v2.AuxInt = int64ToAuxInt(7)
v2.AddArg(v1)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0xFFF8)
v4.AddArg(y)
v3.AddArg(v4)
return true
}
// match: (Rsh8x8 <t> x y)
- // result: (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
+ // result: (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0x00F8] y)))
for {
t := v.Type
x := v_0
v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
v2.AuxInt = int64ToAuxInt(7)
v2.AddArg(v1)
- v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int)
v4.AuxInt = int64ToAuxInt(0x00F8)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v0)
return true
}
- // match: (Select0 (ANDCCconst [m] (ROTLWconst [r] x)))
- // cond: isPPC64WordRotateMask(m)
- // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x)
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- m := auxIntToInt64(v_0.AuxInt)
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ROTLWconst {
- break
- }
- r := auxIntToInt64(v_0_0.AuxInt)
- x := v_0_0.Args[0]
- if !(isPPC64WordRotateMask(m)) {
- break
- }
- v.reset(OpPPC64RLWINM)
- v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32))
- v.AddArg(x)
- return true
- }
- // match: (Select0 (ANDCCconst [m] (ROTLW x r)))
- // cond: isPPC64WordRotateMask(m)
- // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- m := auxIntToInt64(v_0.AuxInt)
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ROTLW {
- break
- }
- r := v_0_0.Args[1]
- x := v_0_0.Args[0]
- if !(isPPC64WordRotateMask(m)) {
- break
- }
- v.reset(OpPPC64RLWNM)
- v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
- v.AddArg2(x, r)
- return true
- }
- // match: (Select0 (ANDCCconst [m] (SRWconst x [s])))
- // cond: mergePPC64RShiftMask(m,s,32) == 0
- // result: (MOVDconst [0])
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- m := auxIntToInt64(v_0.AuxInt)
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64SRWconst {
- break
- }
- s := auxIntToInt64(v_0_0.AuxInt)
- if !(mergePPC64RShiftMask(m, s, 32) == 0) {
- break
- }
- v.reset(OpPPC64MOVDconst)
- v.AuxInt = int64ToAuxInt(0)
- return true
- }
- // match: (Select0 (ANDCCconst [m] (SRWconst x [s])))
- // cond: mergePPC64AndSrwi(m,s) != 0
- // result: (RLWINM [mergePPC64AndSrwi(m,s)] x)
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- m := auxIntToInt64(v_0.AuxInt)
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64SRWconst {
- break
- }
- s := auxIntToInt64(v_0_0.AuxInt)
- x := v_0_0.Args[0]
- if !(mergePPC64AndSrwi(m, s) != 0) {
- break
- }
- v.reset(OpPPC64RLWINM)
- v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s))
- v.AddArg(x)
- return true
- }
- // match: (Select0 (ANDCCconst [-1] x))
- // result: x
- for {
- if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != -1 {
- break
- }
- x := v_0.Args[0]
- v.copyOf(x)
- return true
- }
- // match: (Select0 (ANDCCconst [0] _))
- // result: (MOVDconst [0])
- for {
- if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v.reset(OpPPC64MOVDconst)
- v.AuxInt = int64ToAuxInt(0)
- return true
- }
- // match: (Select0 (ANDCCconst [c] y:(MOVBZreg _)))
- // cond: c&0xFF == 0xFF
- // result: y
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(v_0.AuxInt)
- y := v_0.Args[0]
- if y.Op != OpPPC64MOVBZreg || !(c&0xFF == 0xFF) {
- break
- }
- v.copyOf(y)
- return true
- }
- // match: (Select0 (ANDCCconst [0xFF] (MOVBreg x)))
- // result: (MOVBZreg x)
- for {
- if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0xFF {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64MOVBreg {
- break
- }
- x := v_0_0.Args[0]
- v.reset(OpPPC64MOVBZreg)
- v.AddArg(x)
- return true
- }
- // match: (Select0 (ANDCCconst [c] y:(MOVHZreg _)))
- // cond: c&0xFFFF == 0xFFFF
- // result: y
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(v_0.AuxInt)
- y := v_0.Args[0]
- if y.Op != OpPPC64MOVHZreg || !(c&0xFFFF == 0xFFFF) {
- break
- }
- v.copyOf(y)
- return true
- }
- // match: (Select0 (ANDCCconst [0xFFFF] (MOVHreg x)))
- // result: (MOVHZreg x)
- for {
- if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0xFFFF {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64MOVHreg {
- break
- }
- x := v_0_0.Args[0]
- v.reset(OpPPC64MOVHZreg)
- v.AddArg(x)
- return true
- }
- // match: (Select0 (ANDCCconst [c] (MOVBZreg x)))
- // result: (Select0 (ANDCCconst [c&0xFF] x))
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(v_0.AuxInt)
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64MOVBZreg {
- break
- }
- x := v_0_0.Args[0]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v0.AuxInt = int64ToAuxInt(c & 0xFF)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
- // match: (Select0 (ANDCCconst [c] (MOVHZreg x)))
- // result: (Select0 (ANDCCconst [c&0xFFFF] x))
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(v_0.AuxInt)
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64MOVHZreg {
- break
- }
- x := v_0_0.Args[0]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v0.AuxInt = int64ToAuxInt(c & 0xFFFF)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
- // match: (Select0 (ANDCCconst [c] (MOVWZreg x)))
- // result: (Select0 (ANDCCconst [c&0xFFFFFFFF] x))
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- c := auxIntToInt64(v_0.AuxInt)
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64MOVWZreg {
- break
- }
- x := v_0_0.Args[0]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
- v0.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
- // match: (Select0 (ANDCCconst [m] (RLWINM [r] y)))
- // cond: mergePPC64AndRlwinm(uint32(m),r) != 0
- // result: (RLWINM [mergePPC64AndRlwinm(uint32(m),r)] y)
- for {
- if v_0.Op != OpPPC64ANDCCconst {
- break
- }
- m := auxIntToInt64(v_0.AuxInt)
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64RLWINM {
- break
- }
- r := auxIntToInt64(v_0_0.AuxInt)
- y := v_0_0.Args[0]
- if !(mergePPC64AndRlwinm(uint32(m), r) != 0) {
- break
- }
- v.reset(OpPPC64RLWINM)
- v.AuxInt = int64ToAuxInt(mergePPC64AndRlwinm(uint32(m), r))
- v.AddArg(y)
- return true
- }
- // match: (Select0 (ANDCCconst [1] z:(SRADconst [63] x)))
- // cond: z.Uses == 1
- // result: (SRDconst [63] x)
- for {
- if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 1 {
- break
- }
- z := v_0.Args[0]
- if z.Op != OpPPC64SRADconst || auxIntToInt64(z.AuxInt) != 63 {
- break
- }
- x := z.Args[0]
- if !(z.Uses == 1) {
- break
- }
- v.reset(OpPPC64SRDconst)
- v.AuxInt = int64ToAuxInt(63)
- v.AddArg(x)
- return true
- }
return false
}
func rewriteValuePPC64_OpSelect1(v *Value) bool {
v.copyOf(x)
return true
}
- // match: (Select1 (ANDCCconst [0] _))
- // result: (FlagEQ)
- for {
- if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v.reset(OpPPC64FlagEQ)
- return true
- }
return false
}
func rewriteValuePPC64_OpSelectN(v *Value) bool {
b.resetWithControl(BlockPPC64EQ, cmp)
return true
}
- // match: (EQ (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (EQ (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPconst {
- v_0 := b.Controls[0]
- if auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64EQ, v0)
- return true
- }
- // match: (EQ (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (EQ (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
- v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64EQ, v0)
- return true
- }
// match: (EQ (CMPconst [0] z:(AND x y)) yes no)
// cond: z.Uses == 1
// result: (EQ (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
b.resetWithControl(BlockPPC64LE, cmp)
return true
}
- // match: (GE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (GE (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPconst {
- v_0 := b.Controls[0]
- if auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64GE, v0)
- return true
- }
- // match: (GE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (GE (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
- v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64GE, v0)
- return true
- }
// match: (GE (CMPconst [0] z:(AND x y)) yes no)
// cond: z.Uses == 1
// result: (GE (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
b.resetWithControl(BlockPPC64LT, cmp)
return true
}
- // match: (GT (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (GT (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPconst {
- v_0 := b.Controls[0]
- if auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64GT, v0)
- return true
- }
- // match: (GT (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (GT (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
- v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64GT, v0)
- return true
- }
// match: (GT (CMPconst [0] z:(AND x y)) yes no)
// cond: z.Uses == 1
// result: (GT (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
return true
}
// match: (If cond yes no)
- // result: (NE (Select1 <types.TypeFlags> (ANDCCconst [1] cond)) yes no)
+ // result: (NE (CMPconst [0] (ANDconst [1] cond)) yes no)
for {
cond := b.Controls[0]
- v0 := b.NewValue0(cond.Pos, OpSelect1, types.TypeFlags)
- v1 := b.NewValue0(cond.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v0 := b.NewValue0(cond.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(cond.Pos, OpPPC64ANDconst, typ.Int)
v1.AuxInt = int64ToAuxInt(1)
v1.AddArg(cond)
v0.AddArg(v1)
b.resetWithControl(BlockPPC64GE, cmp)
return true
}
- // match: (LE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (LE (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPconst {
- v_0 := b.Controls[0]
- if auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64LE, v0)
- return true
- }
- // match: (LE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (LE (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
- v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64LE, v0)
- return true
- }
// match: (LE (CMPconst [0] z:(AND x y)) yes no)
// cond: z.Uses == 1
// result: (LE (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
b.resetWithControl(BlockPPC64GT, cmp)
return true
}
- // match: (LT (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (LT (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPconst {
- v_0 := b.Controls[0]
- if auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64LT, v0)
- return true
- }
- // match: (LT (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (LT (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
- v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64LT, v0)
- return true
- }
// match: (LT (CMPconst [0] z:(AND x y)) yes no)
// cond: z.Uses == 1
// result: (LT (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
break
}
case BlockPPC64NE:
- // match: (NE (Select1 (ANDCCconst [1] (Equal cc))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (Equal cc))) yes no)
// result: (EQ cc yes no)
- for b.Controls[0].Op == OpSelect1 {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
b.resetWithControl(BlockPPC64EQ, cc)
return true
}
- // match: (NE (Select1 (ANDCCconst [1] (NotEqual cc))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (NotEqual cc))) yes no)
// result: (NE cc yes no)
- for b.Controls[0].Op == OpSelect1 {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
b.resetWithControl(BlockPPC64NE, cc)
return true
}
- // match: (NE (Select1 (ANDCCconst [1] (LessThan cc))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (LessThan cc))) yes no)
// result: (LT cc yes no)
- for b.Controls[0].Op == OpSelect1 {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
b.resetWithControl(BlockPPC64LT, cc)
return true
}
- // match: (NE (Select1 (ANDCCconst [1] (LessEqual cc))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (LessEqual cc))) yes no)
// result: (LE cc yes no)
- for b.Controls[0].Op == OpSelect1 {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
b.resetWithControl(BlockPPC64LE, cc)
return true
}
- // match: (NE (Select1 (ANDCCconst [1] (GreaterThan cc))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (GreaterThan cc))) yes no)
// result: (GT cc yes no)
- for b.Controls[0].Op == OpSelect1 {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
b.resetWithControl(BlockPPC64GT, cc)
return true
}
- // match: (NE (Select1 (ANDCCconst [1] (GreaterEqual cc))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (GreaterEqual cc))) yes no)
// result: (GE cc yes no)
- for b.Controls[0].Op == OpSelect1 {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
b.resetWithControl(BlockPPC64GE, cc)
return true
}
- // match: (NE (Select1 (ANDCCconst [1] (FLessThan cc))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (FLessThan cc))) yes no)
// result: (FLT cc yes no)
- for b.Controls[0].Op == OpSelect1 {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
b.resetWithControl(BlockPPC64FLT, cc)
return true
}
- // match: (NE (Select1 (ANDCCconst [1] (FLessEqual cc))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (FLessEqual cc))) yes no)
// result: (FLE cc yes no)
- for b.Controls[0].Op == OpSelect1 {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
b.resetWithControl(BlockPPC64FLE, cc)
return true
}
- // match: (NE (Select1 (ANDCCconst [1] (FGreaterThan cc))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (FGreaterThan cc))) yes no)
// result: (FGT cc yes no)
- for b.Controls[0].Op == OpSelect1 {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
b.resetWithControl(BlockPPC64FGT, cc)
return true
}
- // match: (NE (Select1 (ANDCCconst [1] (FGreaterEqual cc))) yes no)
+ // match: (NE (CMPconst [0] (ANDconst [1] (FGreaterEqual cc))) yes no)
// result: (FGE cc yes no)
- for b.Controls[0].Op == OpSelect1 {
+ for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v_0_0_0 := v_0_0.Args[0]
b.resetWithControl(BlockPPC64NE, cmp)
return true
}
- // match: (NE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (NE (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPconst {
- v_0 := b.Controls[0]
- if auxIntToInt64(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64NE, v0)
- return true
- }
- // match: (NE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
- // result: (NE (Select1 <types.TypeFlags> z) yes no)
- for b.Controls[0].Op == OpPPC64CMPWconst {
- v_0 := b.Controls[0]
- if auxIntToInt32(v_0.AuxInt) != 0 {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpSelect0 {
- break
- }
- z := v_0_0.Args[0]
- if z.Op != OpPPC64ANDCCconst {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
- v0.AddArg(z)
- b.resetWithControl(BlockPPC64NE, v0)
- return true
- }
// match: (NE (CMPconst [0] z:(AND x y)) yes no)
// cond: z.Uses == 1
// result: (NE (Select1 <types.TypeFlags> (ANDCC x y)) yes no)