From: Paul E. Murphy Date: Thu, 2 May 2024 20:08:30 +0000 (-0500) Subject: cmd/compile/internal/ssa: reintroduce ANDconst opcode on PPC64 X-Git-Tag: go1.23rc1~203 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=dca577d882f989e41a753537e9607f0c22e4a798;p=gostls13.git cmd/compile/internal/ssa: reintroduce ANDconst opcode on PPC64 This allows more effective conversion of rotate and mask opcodes into their CC equivalents, while simplifying the first lowering pass. This was removed before the latelower pass was introduced to fold more cases of compare against zero. Add ANDconst to push the conversion of ANDconst to ANDCCconst into latelower with the other CC opcodes. This also requires introducing RLDICLCC to prevent regressions when ANDconst is converted to RLDICL then to RLDICLCC and back to ANDCCconst when possible. Change-Id: I9e5f9c99fbefa334db18c6c152c5f967f3ff2590 Reviewed-on: https://go-review.googlesource.com/c/go/+/586160 Reviewed-by: Lynn Boger Reviewed-by: Dmitri Shuralyov LUCI-TryBot-Result: Go LUCI Reviewed-by: Carlos Amedee --- diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index ac5149fb0a..367fd2f6b0 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -629,18 +629,18 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.AddRestSourceArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}}) // Auxint holds mask - case ssa.OpPPC64RLDICL, ssa.OpPPC64RLDICR: + case ssa.OpPPC64RLDICL, ssa.OpPPC64RLDICLCC, ssa.OpPPC64RLDICR: sh, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt) p := s.Prog(v.Op.Asm()) p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: sh} switch v.Op { - case ssa.OpPPC64RLDICL: + case ssa.OpPPC64RLDICL, ssa.OpPPC64RLDICLCC: p.AddRestSourceConst(mb) case ssa.OpPPC64RLDICR: p.AddRestSourceConst(me) } p.Reg = v.Args[0].Reg() - p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()} + p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.ResultReg()} case ssa.OpPPC64RLWNM: _, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt) @@ -691,7 +691,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { case ssa.OpPPC64ADDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst, ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, - ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst, ssa.OpPPC64EXTSWSLconst, ssa.OpPPC64MULLWconst, ssa.OpPPC64MULLDconst: + ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst, ssa.OpPPC64EXTSWSLconst, ssa.OpPPC64MULLWconst, ssa.OpPPC64MULLDconst, + ssa.OpPPC64ANDconst: p := s.Prog(v.Op.Asm()) p.Reg = v.Args[0].Reg() p.From.Type = obj.TYPE_CONST diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64.rules b/src/cmd/compile/internal/ssa/_gen/PPC64.rules index 49d4f460e5..b515b537f5 100644 --- a/src/cmd/compile/internal/ssa/_gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/_gen/PPC64.rules @@ -137,22 +137,22 @@ (ROTL x (MOVDconst [c])) => (ROTLconst x [c&63]) // Combine rotate and mask operations -(Select0 (ANDCCconst [m] (ROTLWconst [r] x))) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x) +(ANDconst [m] (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x) (AND (MOVDconst [m]) (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x) -(Select0 (ANDCCconst [m] (ROTLW x r))) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r) +(ANDconst [m] (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r) (AND (MOVDconst [m]) (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r) // Note, any rotated word bitmask is still a valid word bitmask. (ROTLWconst [r] (AND (MOVDconst [m]) x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x) -(ROTLWconst [r] (Select0 (ANDCCconst [m] x))) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x) +(ROTLWconst [r] (ANDconst [m] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x) -(Select0 (ANDCCconst [m] (SRWconst x [s]))) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0]) -(Select0 (ANDCCconst [m] (SRWconst x [s]))) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x) +(ANDconst [m] (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0]) +(ANDconst [m] (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x) (AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0]) (AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x) -(SRWconst (Select0 (ANDCCconst [m] x)) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0]) -(SRWconst (Select0 (ANDCCconst [m] x)) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x) +(SRWconst (ANDconst [m] x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0]) +(SRWconst (ANDconst [m] x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x) (SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0]) (SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x) @@ -201,38 +201,38 @@ ((Rsh64U|Lsh64)x64 x y) => (ISEL [0] (S(R|L)D x y) (MOVDconst [0]) (CMPUconst y [64])) ((Rsh64U|Lsh64)x32 x y) => (ISEL [0] (S(R|L)D x y) (MOVDconst [0]) (CMPWUconst y [64])) -((Rsh64U|Lsh64)x16 x y) => (ISEL [2] (S(R|L)D x y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFC0] y))) -((Rsh64U|Lsh64)x8 x y) => (ISEL [2] (S(R|L)D x y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00C0] y))) +((Rsh64U|Lsh64)x16 x y) => (ISEL [2] (S(R|L)D x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFC0] y))) +((Rsh64U|Lsh64)x8 x y) => (ISEL [2] (S(R|L)D x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00C0] y))) (Rsh64x(64|32) x y) => (ISEL [0] (SRAD x y) (SRADconst x [63]) (CMP(U|WU)const y [64])) -(Rsh64x16 x y) => (ISEL [2] (SRAD x y) (SRADconst x [63]) (Select1 (ANDCCconst [0xFFC0] y))) -(Rsh64x8 x y) => (ISEL [2] (SRAD x y) (SRADconst x [63]) (Select1 (ANDCCconst [0x00C0] y))) +(Rsh64x16 x y) => (ISEL [2] (SRAD x y) (SRADconst x [63]) (CMPconst [0] (ANDconst [0xFFC0] y))) +(Rsh64x8 x y) => (ISEL [2] (SRAD x y) (SRADconst x [63]) (CMPconst [0] (ANDconst [0x00C0] y))) ((Rsh32U|Lsh32)x64 x y) => (ISEL [0] (S(R|L)W x y) (MOVDconst [0]) (CMPUconst y [32])) ((Rsh32U|Lsh32)x32 x y) => (ISEL [0] (S(R|L)W x y) (MOVDconst [0]) (CMPWUconst y [32])) -((Rsh32U|Lsh32)x16 x y) => (ISEL [2] (S(R|L)W x y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFE0] y))) -((Rsh32U|Lsh32)x8 x y) => (ISEL [2] (S(R|L)W x y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00E0] y))) +((Rsh32U|Lsh32)x16 x y) => (ISEL [2] (S(R|L)W x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFE0] y))) +((Rsh32U|Lsh32)x8 x y) => (ISEL [2] (S(R|L)W x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00E0] y))) (Rsh32x(64|32) x y) => (ISEL [0] (SRAW x y) (SRAWconst x [31]) (CMP(U|WU)const y [32])) -(Rsh32x16 x y) => (ISEL [2] (SRAW x y) (SRAWconst x [31]) (Select1 (ANDCCconst [0xFFE0] y))) -(Rsh32x8 x y) => (ISEL [2] (SRAW x y) (SRAWconst x [31]) (Select1 (ANDCCconst [0x00E0] y))) +(Rsh32x16 x y) => (ISEL [2] (SRAW x y) (SRAWconst x [31]) (CMPconst [0] (ANDconst [0xFFE0] y))) +(Rsh32x8 x y) => (ISEL [2] (SRAW x y) (SRAWconst x [31]) (CMPconst [0] (ANDconst [0x00E0] y))) ((Rsh16U|Lsh16)x64 x y) => (ISEL [0] (S(R|L)D (MOVHZreg x) y) (MOVDconst [0]) (CMPUconst y [16])) ((Rsh16U|Lsh16)x32 x y) => (ISEL [0] (S(R|L)D (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst y [16])) -((Rsh16U|Lsh16)x16 x y) => (ISEL [2] (S(R|L)D (MOVHZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFF0] y))) -((Rsh16U|Lsh16)x8 x y) => (ISEL [2] (S(R|L)D (MOVHZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00F0] y))) +((Rsh16U|Lsh16)x16 x y) => (ISEL [2] (S(R|L)D (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF0] y))) +((Rsh16U|Lsh16)x8 x y) => (ISEL [2] (S(R|L)D (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F0] y))) (Rsh16x(64|32) x y) => (ISEL [0] (SRAD (MOVHreg x) y) (SRADconst (MOVHreg x) [15]) (CMP(U|WU)const y [16])) -(Rsh16x16 x y) => (ISEL [2] (SRAD (MOVHreg x) y) (SRADconst (MOVHreg x) [15]) (Select1 (ANDCCconst [0xFFF0] y))) -(Rsh16x8 x y) => (ISEL [2] (SRAD (MOVHreg x) y) (SRADconst (MOVHreg x) [15]) (Select1 (ANDCCconst [0x00F0] y))) +(Rsh16x16 x y) => (ISEL [2] (SRAD (MOVHreg x) y) (SRADconst (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0xFFF0] y))) +(Rsh16x8 x y) => (ISEL [2] (SRAD (MOVHreg x) y) (SRADconst (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0x00F0] y))) ((Rsh8U|Lsh8)x64 x y) => (ISEL [0] (S(R|L)D (MOVBZreg x) y) (MOVDconst [0]) (CMPUconst y [8])) ((Rsh8U|Lsh8)x32 x y) => (ISEL [0] (S(R|L)D (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst y [8])) -((Rsh8U|Lsh8)x16 x y) => (ISEL [2] (S(R|L)D (MOVBZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFF8] y))) -((Rsh8U|Lsh8)x8 x y) => (ISEL [2] (S(R|L)D (MOVBZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00F8] y))) +((Rsh8U|Lsh8)x16 x y) => (ISEL [2] (S(R|L)D (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF8] y))) +((Rsh8U|Lsh8)x8 x y) => (ISEL [2] (S(R|L)D (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F8] y))) (Rsh8x(64|32) x y) => (ISEL [0] (SRAD (MOVBreg x) y) (SRADconst (MOVBreg x) [7]) (CMP(U|WU)const y [8])) -(Rsh8x16 x y) => (ISEL [2] (SRAD (MOVBreg x) y) (SRADconst (MOVBreg x) [7]) (Select1 (ANDCCconst [0xFFF8] y))) -(Rsh8x8 x y) => (ISEL [2] (SRAD (MOVBreg x) y) (SRADconst (MOVBreg x) [7]) (Select1 (ANDCCconst [0x00F8] y))) +(Rsh8x16 x y) => (ISEL [2] (SRAD (MOVBreg x) y) (SRADconst (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0xFFF8] y))) +(Rsh8x8 x y) => (ISEL [2] (SRAD (MOVBreg x) y) (SRADconst (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0x00F8] y))) // Catch bounded shifts in situations like foo< uint64(c) => (FlagLT) +(CMP(U|WU)const [d] (ANDconst z [c])) && uint64(d) > uint64(c) => (FlagLT) (ORN x (MOVDconst [-1])) => x @@ -282,7 +282,7 @@ (OR x (NOR y y)) => (ORN x y) // Lowering comparisons -(EqB x y) => (Select0 (ANDCCconst [1] (EQV x y))) +(EqB x y) => (ANDconst [1] (EQV x y)) // Sign extension dependence on operand sign sets up for sign/zero-extension elision later (Eq(8|16) x y) && x.Type.IsSigned() && y.Type.IsSigned() => (Equal (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y))) (Eq(8|16) x y) => (Equal (CMPW (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y))) @@ -322,11 +322,11 @@ (If (FGreaterThan cc) yes no) => (FGT cc yes no) (If (FGreaterEqual cc) yes no) => (FGE cc yes no) -(If cond yes no) => (NE (Select1 (ANDCCconst [1] cond)) yes no) +(If cond yes no) => (NE (CMPconst [0] (ANDconst [1] cond)) yes no) // Absorb boolean tests into block -(NE (Select1 (ANDCCconst [1] ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) cc))) yes no) => ((EQ|NE|LT|LE|GT|GE) cc yes no) -(NE (Select1 (ANDCCconst [1] ((FLessThan|FLessEqual|FGreaterThan|FGreaterEqual) cc))) yes no) => ((FLT|FLE|FGT|FGE) cc yes no) +(NE (CMPconst [0] (ANDconst [1] ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) cc))) yes no) => ((EQ|NE|LT|LE|GT|GE) cc yes no) +(NE (CMPconst [0] (ANDconst [1] ((FLessThan|FLessEqual|FGreaterThan|FGreaterEqual) cc))) yes no) => ((FLT|FLE|FGT|FGE) cc yes no) // absorb flag constants into branches (EQ (FlagEQ) yes no) => (First yes no) @@ -408,8 +408,6 @@ // Elide compares of bit tests -((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 z) yes no) -((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 z) yes no) ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 (ANDCC x y)) yes no) ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 (ORCC x y)) yes no) ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 (XORCC x y)) yes no) @@ -417,9 +415,9 @@ (CondSelect x y (SETBC [a] cmp)) => (ISEL [a] x y cmp) (CondSelect x y (SETBCR [a] cmp)) => (ISEL [a+4] x y cmp) // Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably. -(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (Select1 (ANDCCconst [1] bool))) +(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (CMPconst [0] (ANDconst [1] bool))) // Fold any CR -> GPR -> CR transfers when applying the above rule. -(ISEL [6] x y (Select1 (ANDCCconst [1] (SETBC [c] cmp)))) => (ISEL [c] x y cmp) +(ISEL [6] x y (CMPconst [0] (ANDconst [1] (SETBC [c] cmp)))) => (ISEL [c] x y cmp) (ISEL [6] x y ((CMP|CMPW)const [0] (SETBC [c] cmp))) => (ISEL [c] x y cmp) (ISEL [6] x y ((CMP|CMPW)const [0] (SETBCR [c] cmp))) => (ISEL [c+4] x y cmp) @@ -563,45 +561,44 @@ // Discover consts (AND x (MOVDconst [-1])) => x -(AND x (MOVDconst [c])) && isU16Bit(c) => (Select0 (ANDCCconst [c] x)) +(AND x (MOVDconst [c])) && isU16Bit(c) => (ANDconst [c] x) (XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x) (OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x) // Simplify consts -(ANDCCconst [c] (Select0 (ANDCCconst [d] x))) => (ANDCCconst [c&d] x) +(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x) (ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x) (XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x) -(Select0 (ANDCCconst [-1] x)) => x -(Select0 (ANDCCconst [0] _)) => (MOVDconst [0]) -(Select1 (ANDCCconst [0] _)) => (FlagEQ) +(ANDconst [-1] x) => x +(ANDconst [0] _) => (MOVDconst [0]) (XORconst [0] x) => x (ORconst [-1] _) => (MOVDconst [-1]) (ORconst [0] x) => x // zero-extend of small and => small and -(MOVBZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFF => y -(MOVHZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFF => y -(MOVWZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFFFFFF => y +(MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF => y +(MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y +(MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF => y (MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF => y // sign extend of small-positive and => small-positive-and -(MOVBreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0x7F => y -(MOVHreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0x7FFF => y -(MOVWreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0 +(MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F => y +(MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF => y +(MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0 (MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF => y // small and of zero-extend => either zero-extend or small and -(Select0 (ANDCCconst [c] y:(MOVBZreg _))) && c&0xFF == 0xFF => y -(Select0 (ANDCCconst [0xFF] (MOVBreg x))) => (MOVBZreg x) -(Select0 (ANDCCconst [c] y:(MOVHZreg _))) && c&0xFFFF == 0xFFFF => y -(Select0 (ANDCCconst [0xFFFF] (MOVHreg x))) => (MOVHZreg x) +(ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF => y +(ANDconst [0xFF] (MOVBreg x)) => (MOVBZreg x) +(ANDconst [c] y:(MOVHZreg _)) && c&0xFFFF == 0xFFFF => y +(ANDconst [0xFFFF] (MOVHreg x)) => (MOVHZreg x) (AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF => y (AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x) // normal case -(Select0 (ANDCCconst [c] (MOVBZreg x))) => (Select0 (ANDCCconst [c&0xFF] x)) -(Select0 (ANDCCconst [c] (MOVHZreg x))) => (Select0 (ANDCCconst [c&0xFFFF] x)) -(Select0 (ANDCCconst [c] (MOVWZreg x))) => (Select0 (ANDCCconst [c&0xFFFFFFFF] x)) +(ANDconst [c] (MOVBZreg x)) => (ANDconst [c&0xFF] x) +(ANDconst [c] (MOVHZreg x)) => (ANDconst [c&0xFFFF] x) +(ANDconst [c] (MOVWZreg x)) => (ANDconst [c&0xFFFFFFFF] x) // Eliminate unnecessary sign/zero extend following right shift (MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x)) @@ -650,10 +647,10 @@ (MOVBZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFF,r)] y) (MOVHZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFFFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFFFF,r)] y) (MOVWZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFFFFFFFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFFFFFFFF,r)] y) -(Select0 (ANDCCconst [m] (RLWINM [r] y))) && mergePPC64AndRlwinm(uint32(m),r) != 0 => (RLWINM [mergePPC64AndRlwinm(uint32(m),r)] y) +(ANDconst [m] (RLWINM [r] y)) && mergePPC64AndRlwinm(uint32(m),r) != 0 => (RLWINM [mergePPC64AndRlwinm(uint32(m),r)] y) (SLDconst [s] (RLWINM [r] y)) && mergePPC64SldiRlwinm(s,r) != 0 => (RLWINM [mergePPC64SldiRlwinm(s,r)] y) (RLWINM [r] (MOVHZreg u)) && mergePPC64RlwinmAnd(r,0xFFFF) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,0xFFFF)] u) -(RLWINM [r] (Select0 (ANDCCconst [a] u))) && mergePPC64RlwinmAnd(r,uint32(a)) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,uint32(a))] u) +(RLWINM [r] (ANDconst [a] u)) && mergePPC64RlwinmAnd(r,uint32(a)) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,uint32(a))] u) // SLWconst is a special case of RLWNM which always zero-extends the result. (SLWconst [s] (MOVWZreg w)) => (SLWconst [s] w) (MOVWZreg w:(SLWconst u)) => w @@ -682,10 +679,10 @@ (MOVBZreg ((OR|XOR|AND) x (MOVHZreg y))) => (MOVBZreg ((OR|XOR|AND) x y)) (MOVBZreg ((OR|XOR|AND) x (MOVBZreg y))) => (MOVBZreg ((OR|XOR|AND) x y)) -(MOV(B|H|W)Zreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) => z +(MOV(B|H|W)Zreg z:(ANDconst [c] (MOVBZload ptr x))) => z (MOV(B|H|W)Zreg z:(AND y (MOV(B|H|W)Zload ptr x))) => z -(MOV(H|W)Zreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x)))) => z -(MOVWZreg z:(Select0 (ANDCCconst [c] (MOVWZload ptr x)))) => z +(MOV(H|W)Zreg z:(ANDconst [c] (MOVHZload ptr x))) => z +(MOVWZreg z:(ANDconst [c] (MOVWZload ptr x))) => z // Arithmetic constant ops @@ -818,7 +815,7 @@ (AtomicOr(8|32) ...) => (LoweredAtomicOr(8|32) ...) (Slicemask x) => (SRADconst (NEG x) [63]) -(Select0 (ANDCCconst [1] z:(SRADconst [63] x))) && z.Uses == 1 => (SRDconst [63] x) +(ANDconst [1] z:(SRADconst [63] x)) && z.Uses == 1 => (SRDconst [63] x) // Note that MOV??reg returns a 64-bit int, x is not necessarily that wide // This may interact with other patterns in the future. (Compare with arm64) @@ -854,11 +851,11 @@ (SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x) (SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x) -(SLDconst [c] z:(Select0 (ANDCCconst [d] x))) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) +(SLDconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) (SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) (SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x) (SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x) -(SLWconst [c] z:(Select0 (ANDCCconst [d] x))) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) +(SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) (SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) // special case for power9 (SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && buildcfg.GOPPC64 >= 9 => (EXTSWSLconst [c] x) @@ -894,8 +891,9 @@ // Canonicalize the order of arguments to comparisons - helps with CSE. ((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x)) -// n is always a zero-extended uint16 value, so n & z is always a non-negative 32 or 64 bit value. Use the flag result of ANDCCconst. -((CMP|CMPW|CMPU|CMPWU)const [0] (Select0 a:(ANDCCconst [n] z))) => (Select1 a) +// n is always a zero-extended uint16 value, so n & z is always a non-negative 32 or 64 bit value. +// Rewrite to a cmp int64(0) to lower into ANDCCconst in the latelower pass. +(CMP(W|U|WU)const [0] a:(ANDconst [n] z)) => (CMPconst [0] a) // SETBC auxInt values 0=LT 1=GT 2=EQ Crbit==1 ? 1 : 0 // SETBCR auxInt values 0=LT 1=GT 2=EQ Crbit==1 ? 0 : 1 @@ -962,8 +960,8 @@ (XORconst [1] (SETBCR [n] cmp)) => (SETBC [n] cmp) (XORconst [1] (SETBC [n] cmp)) => (SETBCR [n] cmp) -(SETBC [2] (Select1 a:(ANDCCconst [1] _))) => (XORconst [1] (Select0 a)) -(SETBCR [2] (Select1 a:(ANDCCconst [1] _))) => (Select0 a) +(SETBC [2] (CMPconst [0] a:(ANDconst [1] _))) => (XORconst [1] a) +(SETBCR [2] (CMPconst [0] a:(ANDconst [1] _))) => a // Only CMPconst for these in case AND|OR|XOR result is > 32 bits (SETBC [2] (CMPconst [0] a:(AND y z))) && a.Uses == 1 => (SETBC [2] (Select1 (ANDCC y z ))) @@ -976,7 +974,7 @@ (SETBCR [2] (CMPconst [0] a:(XOR y z))) && a.Uses == 1 => (SETBCR [2] (Select1 (XORCC y z ))) // A particular pattern seen in cgo code: -(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (Select0 (ANDCCconst [c&0xFF] x)) +(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (ANDconst [c&0xFF] x) // floating point negative abs (FNEG (F(ABS|NABS) x)) => (F(NABS|ABS) x) diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go index 7f0ee9ab91..799881a8cd 100644 --- a/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go @@ -248,11 +248,12 @@ func init() { {name: "ROTLWconst", argLength: 1, reg: gp11, asm: "ROTLW", aux: "Int64"}, // uint32(arg0) rotate left by auxInt bits {name: "EXTSWSLconst", argLength: 1, reg: gp11, asm: "EXTSWSLI", aux: "Int64"}, - {name: "RLWINM", argLength: 1, reg: gp11, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by immediate "rlwinm". encodePPC64RotateMask describes aux - {name: "RLWNM", argLength: 2, reg: gp21, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by "rlwnm". encodePPC64RotateMask describes aux - {name: "RLWMI", argLength: 2, reg: gp21a0, asm: "RLWMI", aux: "Int64", resultInArg0: true}, // "rlwimi" similar aux encoding as above - {name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int64"}, // Auxint is encoded similarly to RLWINM, but only MB and SH are valid. ME is always 63. - {name: "RLDICR", argLength: 1, reg: gp11, asm: "RLDICR", aux: "Int64"}, // Likewise, but only ME and SH are valid. MB is always 0. + {name: "RLWINM", argLength: 1, reg: gp11, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by immediate "rlwinm". encodePPC64RotateMask describes aux + {name: "RLWNM", argLength: 2, reg: gp21, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by "rlwnm". encodePPC64RotateMask describes aux + {name: "RLWMI", argLength: 2, reg: gp21a0, asm: "RLWMI", aux: "Int64", resultInArg0: true}, // "rlwimi" similar aux encoding as above + {name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int64"}, // Auxint is encoded similarly to RLWINM, but only MB and SH are valid. ME is always 63. + {name: "RLDICLCC", argLength: 1, reg: gp11, asm: "RLDICLCC", aux: "Int64", typ: "(Int, Flags)"}, // Auxint is encoded similarly to RLWINM, but only MB and SH are valid. ME is always 63. Sets CC. + {name: "RLDICR", argLength: 1, reg: gp11, asm: "RLDICR", aux: "Int64"}, // Likewise, but only ME and SH are valid. MB is always 0. {name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD"}, // count leading zeros {name: "CNTLZDCC", argLength: 1, reg: gp11, asm: "CNTLZDCC", typ: "(Int, Flags)"}, // count leading zeros, sets CC @@ -323,9 +324,10 @@ func init() { {name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"}, // -abs(arg0), float64 {name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"}, // copysign arg0 -> arg1, float64 - {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux - {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux - {name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", typ: "(Int,Flags)"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always. + {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux + {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux + {name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", typ: "(Int,Flags)"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always. + {name: "ANDconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, clobberFlags: true, asm: "ANDCC", aux: "Int64", typ: "Int"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always. {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB", typ: "Int64"}, // sign extend int8 to int64 {name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ", typ: "Int64"}, // zero extend uint8 to uint64 diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules b/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules index 2eecf94300..7aa8f41e78 100644 --- a/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules +++ b/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules @@ -18,11 +18,8 @@ (SETBC [1] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [1] (MOVDconst [1]) cmp) (SETBCR [1] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [5] (MOVDconst [1]) cmp) -// Avoid using ANDCCconst if the value for CR0 is not needed, since ANDCCconst -// always sets it. -(Select0 z:(ANDCCconst [m] x)) && z.Uses == 1 && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] x) // The upper bits of the smaller than register values is undefined. Take advantage of that. -(AND x:(MOVDconst [m]) n) && t.Size() <= 2 => (Select0 (ANDCCconst [int64(int16(m))] n)) +(AND x:(MOVDconst [m]) n) && t.Size() <= 2 => (ANDconst [int64(int16(m))] n) // Convert simple bit masks to an equivalent rldic[lr] if possible. (AND x:(MOVDconst [m]) n) && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] n) @@ -47,9 +44,17 @@ // Note: to minimize potentially expensive regeneration of CC opcodes during the flagalloc pass, only rewrite if // both ops are in the same block. (CMPconst [0] z:((ADD|AND|ANDN|OR|SUB|NOR|XOR) x y)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z)) -(CMPconst [0] z:((NEG|CNTLZD) x)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z)) +(CMPconst [0] z:((NEG|CNTLZD|RLDICL) x)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z)) // Note: ADDCCconst only assembles to 1 instruction for int16 constants. (CMPconst [0] z:(ADDconst [c] x)) && int64(int16(c)) == c && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z)) +(CMPconst [0] z:(ANDconst [c] x)) && int64(uint16(c)) == c && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z)) // And finally, fixup the flag user. (CMPconst [0] (Select0 z:((ADD|AND|ANDN|OR|SUB|NOR|XOR)CC x y))) => (Select1 z) -(CMPconst [0] (Select0 z:((ADDCCconst|NEGCC|CNTLZDCC) y))) => (Select1 z) +(CMPconst [0] (Select0 z:((ADDCCconst|ANDCCconst|NEGCC|CNTLZDCC|RLDICLCC) y))) => (Select1 z) + +// After trying to convert ANDconst to ANDCCconst above, if the CC result is not needed, try to avoid using +// ANDconst which clobbers CC. +(ANDconst [m] x) && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] x) + +// Likewise, trying converting RLDICLCC back to ANDCCconst as it is faster. +(RLDICLCC [a] x) && convertPPC64RldiclAndccconst(a) != 0 => (ANDCCconst [convertPPC64RldiclAndccconst(a)] x) diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 429c214395..b07d515bd1 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2165,6 +2165,7 @@ const ( OpPPC64RLWNM OpPPC64RLWMI OpPPC64RLDICL + OpPPC64RLDICLCC OpPPC64RLDICR OpPPC64CNTLZD OpPPC64CNTLZDCC @@ -2221,6 +2222,7 @@ const ( OpPPC64ORconst OpPPC64XORconst OpPPC64ANDCCconst + OpPPC64ANDconst OpPPC64MOVBreg OpPPC64MOVBZreg OpPPC64MOVHreg @@ -29121,6 +29123,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "RLDICLCC", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ARLDICLCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, { name: "RLDICR", auxType: auxInt64, @@ -29885,6 +29901,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + asm: ppc64.AANDCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, { name: "MOVBreg", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index c3007781d6..f90e65f492 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -1745,9 +1745,11 @@ func convertPPC64OpToOpCC(op *Value) *Value { OpPPC64ADD: OpPPC64ADDCC, OpPPC64ADDconst: OpPPC64ADDCCconst, OpPPC64AND: OpPPC64ANDCC, + OpPPC64ANDconst: OpPPC64ANDCCconst, OpPPC64ANDN: OpPPC64ANDNCC, OpPPC64CNTLZD: OpPPC64CNTLZDCC, OpPPC64OR: OpPPC64ORCC, + OpPPC64RLDICL: OpPPC64RLDICLCC, OpPPC64SUB: OpPPC64SUBCC, OpPPC64NEG: OpPPC64NEGCC, OpPPC64NOR: OpPPC64NORCC, @@ -1761,6 +1763,15 @@ func convertPPC64OpToOpCC(op *Value) *Value { return op } +// Try converting a RLDICL to ANDCC. If successful, return the mask otherwise 0. +func convertPPC64RldiclAndccconst(sauxint int64) int64 { + r, _, _, mask := DecodePPC64RotateMask(sauxint) + if r != 0 || mask&0xFFFF != mask { + return 0 + } + return int64(mask) +} + // Convenience function to rotate a 32 bit constant value by another constant. func rotateLeft32(v, rotate int64) int64 { return int64(bits.RotateLeft32(uint32(v), int(rotate))) diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 6e39ee5576..209f029e33 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -449,10 +449,10 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64ADDconst(v) case OpPPC64AND: return rewriteValuePPC64_OpPPC64AND(v) - case OpPPC64ANDCCconst: - return rewriteValuePPC64_OpPPC64ANDCCconst(v) case OpPPC64ANDN: return rewriteValuePPC64_OpPPC64ANDN(v) + case OpPPC64ANDconst: + return rewriteValuePPC64_OpPPC64ANDconst(v) case OpPPC64BRD: return rewriteValuePPC64_OpPPC64BRD(v) case OpPPC64BRH: @@ -1397,7 +1397,7 @@ func rewriteValuePPC64_OpCondSelect(v *Value) bool { } // match: (CondSelect x y bool) // cond: flagArg(bool) == nil - // result: (ISEL [6] x y (Select1 (ANDCCconst [1] bool))) + // result: (ISEL [6] x y (CMPconst [0] (ANDconst [1] bool))) for { x := v_0 y := v_1 @@ -1407,8 +1407,9 @@ func rewriteValuePPC64_OpCondSelect(v *Value) bool { } v.reset(OpPPC64ISEL) v.AuxInt = int32ToAuxInt(6) - v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v1.AuxInt = int64ToAuxInt(1) v1.AddArg(bool) v0.AddArg(v1) @@ -1987,17 +1988,14 @@ func rewriteValuePPC64_OpEqB(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqB x y) - // result: (Select0 (ANDCCconst [1] (EQV x y))) + // result: (ANDconst [1] (EQV x y)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v.Type = typ.Int - v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v0.AuxInt = int64ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64) - v1.AddArg2(x, y) - v0.AddArg(v1) + v.reset(OpPPC64ANDconst) + v.AuxInt = int64ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64) + v0.AddArg2(x, y) v.AddArg(v0) return true } @@ -2633,7 +2631,7 @@ func rewriteValuePPC64_OpLsh16x16(v *Value) bool { return true } // match: (Lsh16x16 x y) - // result: (ISEL [2] (SLD (MOVHZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFF0] y))) + // result: (ISEL [2] (SLD (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF0] y))) for { t := v.Type x := v_0 @@ -2646,8 +2644,9 @@ func rewriteValuePPC64_OpLsh16x16(v *Value) bool { v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v2.AuxInt = int64ToAuxInt(0) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(0) + v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v4.AuxInt = int64ToAuxInt(0xFFF0) v4.AddArg(y) v3.AddArg(v4) @@ -2769,7 +2768,7 @@ func rewriteValuePPC64_OpLsh16x8(v *Value) bool { return true } // match: (Lsh16x8 x y) - // result: (ISEL [2] (SLD (MOVHZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00F0] y))) + // result: (ISEL [2] (SLD (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F0] y))) for { t := v.Type x := v_0 @@ -2782,8 +2781,9 @@ func rewriteValuePPC64_OpLsh16x8(v *Value) bool { v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v2.AuxInt = int64ToAuxInt(0) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(0) + v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v4.AuxInt = int64ToAuxInt(0x00F0) v4.AddArg(y) v3.AddArg(v4) @@ -2810,7 +2810,7 @@ func rewriteValuePPC64_OpLsh32x16(v *Value) bool { return true } // match: (Lsh32x16 x y) - // result: (ISEL [2] (SLW x y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFE0] y))) + // result: (ISEL [2] (SLW x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFE0] y))) for { t := v.Type x := v_0 @@ -2821,8 +2821,9 @@ func rewriteValuePPC64_OpLsh32x16(v *Value) bool { v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(0) - v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v3.AuxInt = int64ToAuxInt(0xFFE0) v3.AddArg(y) v2.AddArg(v3) @@ -2940,7 +2941,7 @@ func rewriteValuePPC64_OpLsh32x8(v *Value) bool { return true } // match: (Lsh32x8 x y) - // result: (ISEL [2] (SLW x y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00E0] y))) + // result: (ISEL [2] (SLW x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00E0] y))) for { t := v.Type x := v_0 @@ -2951,8 +2952,9 @@ func rewriteValuePPC64_OpLsh32x8(v *Value) bool { v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(0) - v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v3.AuxInt = int64ToAuxInt(0x00E0) v3.AddArg(y) v2.AddArg(v3) @@ -2979,7 +2981,7 @@ func rewriteValuePPC64_OpLsh64x16(v *Value) bool { return true } // match: (Lsh64x16 x y) - // result: (ISEL [2] (SLD x y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFC0] y))) + // result: (ISEL [2] (SLD x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFC0] y))) for { t := v.Type x := v_0 @@ -2990,8 +2992,9 @@ func rewriteValuePPC64_OpLsh64x16(v *Value) bool { v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(0) - v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v3.AuxInt = int64ToAuxInt(0xFFC0) v3.AddArg(y) v2.AddArg(v3) @@ -3109,7 +3112,7 @@ func rewriteValuePPC64_OpLsh64x8(v *Value) bool { return true } // match: (Lsh64x8 x y) - // result: (ISEL [2] (SLD x y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00C0] y))) + // result: (ISEL [2] (SLD x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00C0] y))) for { t := v.Type x := v_0 @@ -3120,8 +3123,9 @@ func rewriteValuePPC64_OpLsh64x8(v *Value) bool { v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(0) - v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v3.AuxInt = int64ToAuxInt(0x00C0) v3.AddArg(y) v2.AddArg(v3) @@ -3148,7 +3152,7 @@ func rewriteValuePPC64_OpLsh8x16(v *Value) bool { return true } // match: (Lsh8x16 x y) - // result: (ISEL [2] (SLD (MOVBZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFF8] y))) + // result: (ISEL [2] (SLD (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF8] y))) for { t := v.Type x := v_0 @@ -3161,8 +3165,9 @@ func rewriteValuePPC64_OpLsh8x16(v *Value) bool { v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v2.AuxInt = int64ToAuxInt(0) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(0) + v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v4.AuxInt = int64ToAuxInt(0xFFF8) v4.AddArg(y) v3.AddArg(v4) @@ -3284,7 +3289,7 @@ func rewriteValuePPC64_OpLsh8x8(v *Value) bool { return true } // match: (Lsh8x8 x y) - // result: (ISEL [2] (SLD (MOVBZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00F8] y))) + // result: (ISEL [2] (SLD (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F8] y))) for { t := v.Type x := v_0 @@ -3297,8 +3302,9 @@ func rewriteValuePPC64_OpLsh8x8(v *Value) bool { v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v2.AuxInt = int64ToAuxInt(0) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(0) + v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v4.AuxInt = int64ToAuxInt(0x00F8) v4.AddArg(y) v3.AddArg(v4) @@ -4189,8 +4195,6 @@ func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool { func rewriteValuePPC64_OpPPC64AND(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types // match: (AND (MOVDconst [m]) (ROTLWconst [r] x)) // cond: isPPC64WordRotateMask(m) // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x) @@ -4336,7 +4340,7 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { } // match: (AND x (MOVDconst [c])) // cond: isU16Bit(c) - // result: (Select0 (ANDCCconst [c] x)) + // result: (ANDconst [c] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 @@ -4347,11 +4351,9 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { if !(isU16Bit(c)) { continue } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpPPC64ANDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) return true } break @@ -4393,7 +4395,7 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { break } // match: (AND (MOVDconst [c]) x:(MOVBZload _ _)) - // result: (Select0 (ANDCCconst [c&0xFF] x)) + // result: (ANDconst [c&0xFF] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64MOVDconst { @@ -4404,55 +4406,262 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { if x.Op != OpPPC64MOVBZload { continue } - v.reset(OpSelect0) - v0 := b.NewValue0(x.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v0.AuxInt = int64ToAuxInt(c & 0xFF) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpPPC64ANDconst) + v.AuxInt = int64ToAuxInt(c & 0xFF) + v.AddArg(x) return true } break } return false } -func rewriteValuePPC64_OpPPC64ANDCCconst(v *Value) bool { +func rewriteValuePPC64_OpPPC64ANDN(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ANDCCconst [c] (Select0 (ANDCCconst [d] x))) - // result: (ANDCCconst [c&d] x) + // match: (ANDN (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [c&^d]) for { - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpSelect0 { + if v_0.Op != OpPPC64MOVDconst { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst { + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpPPC64MOVDconst { break } - d := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v.reset(OpPPC64ANDCCconst) - v.AuxInt = int64ToAuxInt(c & d) - v.AddArg(x) + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(c &^ d) return true } return false } -func rewriteValuePPC64_OpPPC64ANDN(v *Value) bool { - v_1 := v.Args[1] +func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool { v_0 := v.Args[0] - // match: (ANDN (MOVDconst [c]) (MOVDconst [d])) - // result: (MOVDconst [c&^d]) + // match: (ANDconst [m] (ROTLWconst [r] x)) + // cond: isPPC64WordRotateMask(m) + // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x) for { - if v_0.Op != OpPPC64MOVDconst { + m := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64ROTLWconst { break } - c := auxIntToInt64(v_0.AuxInt) - if v_1.Op != OpPPC64MOVDconst { + r := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(isPPC64WordRotateMask(m)) { + break + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32)) + v.AddArg(x) + return true + } + // match: (ANDconst [m] (ROTLW x r)) + // cond: isPPC64WordRotateMask(m) + // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r) + for { + m := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64ROTLW { + break + } + r := v_0.Args[1] + x := v_0.Args[0] + if !(isPPC64WordRotateMask(m)) { + break + } + v.reset(OpPPC64RLWNM) + v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32)) + v.AddArg2(x, r) + return true + } + // match: (ANDconst [m] (SRWconst x [s])) + // cond: mergePPC64RShiftMask(m,s,32) == 0 + // result: (MOVDconst [0]) + for { + m := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64SRWconst { + break + } + s := auxIntToInt64(v_0.AuxInt) + if !(mergePPC64RShiftMask(m, s, 32) == 0) { break } - d := auxIntToInt64(v_1.AuxInt) v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(c &^ d) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (ANDconst [m] (SRWconst x [s])) + // cond: mergePPC64AndSrwi(m,s) != 0 + // result: (RLWINM [mergePPC64AndSrwi(m,s)] x) + for { + m := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64SRWconst { + break + } + s := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(mergePPC64AndSrwi(m, s) != 0) { + break + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s)) + v.AddArg(x) + return true + } + // match: (ANDconst [c] (ANDconst [d] x)) + // result: (ANDconst [c&d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64ANDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpPPC64ANDconst) + v.AuxInt = int64ToAuxInt(c & d) + v.AddArg(x) + return true + } + // match: (ANDconst [-1] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ANDconst [0] _) + // result: (MOVDconst [0]) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (ANDconst [c] y:(MOVBZreg _)) + // cond: c&0xFF == 0xFF + // result: y + for { + c := auxIntToInt64(v.AuxInt) + y := v_0 + if y.Op != OpPPC64MOVBZreg || !(c&0xFF == 0xFF) { + break + } + v.copyOf(y) + return true + } + // match: (ANDconst [0xFF] (MOVBreg x)) + // result: (MOVBZreg x) + for { + if auxIntToInt64(v.AuxInt) != 0xFF || v_0.Op != OpPPC64MOVBreg { + break + } + x := v_0.Args[0] + v.reset(OpPPC64MOVBZreg) + v.AddArg(x) + return true + } + // match: (ANDconst [c] y:(MOVHZreg _)) + // cond: c&0xFFFF == 0xFFFF + // result: y + for { + c := auxIntToInt64(v.AuxInt) + y := v_0 + if y.Op != OpPPC64MOVHZreg || !(c&0xFFFF == 0xFFFF) { + break + } + v.copyOf(y) + return true + } + // match: (ANDconst [0xFFFF] (MOVHreg x)) + // result: (MOVHZreg x) + for { + if auxIntToInt64(v.AuxInt) != 0xFFFF || v_0.Op != OpPPC64MOVHreg { + break + } + x := v_0.Args[0] + v.reset(OpPPC64MOVHZreg) + v.AddArg(x) + return true + } + // match: (ANDconst [c] (MOVBZreg x)) + // result: (ANDconst [c&0xFF] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64MOVBZreg { + break + } + x := v_0.Args[0] + v.reset(OpPPC64ANDconst) + v.AuxInt = int64ToAuxInt(c & 0xFF) + v.AddArg(x) + return true + } + // match: (ANDconst [c] (MOVHZreg x)) + // result: (ANDconst [c&0xFFFF] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64MOVHZreg { + break + } + x := v_0.Args[0] + v.reset(OpPPC64ANDconst) + v.AuxInt = int64ToAuxInt(c & 0xFFFF) + v.AddArg(x) + return true + } + // match: (ANDconst [c] (MOVWZreg x)) + // result: (ANDconst [c&0xFFFFFFFF] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64MOVWZreg { + break + } + x := v_0.Args[0] + v.reset(OpPPC64ANDconst) + v.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF) + v.AddArg(x) + return true + } + // match: (ANDconst [m] (RLWINM [r] y)) + // cond: mergePPC64AndRlwinm(uint32(m),r) != 0 + // result: (RLWINM [mergePPC64AndRlwinm(uint32(m),r)] y) + for { + m := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64RLWINM { + break + } + r := auxIntToInt64(v_0.AuxInt) + y := v_0.Args[0] + if !(mergePPC64AndRlwinm(uint32(m), r) != 0) { + break + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(mergePPC64AndRlwinm(uint32(m), r)) + v.AddArg(y) + return true + } + // match: (ANDconst [1] z:(SRADconst [63] x)) + // cond: z.Uses == 1 + // result: (SRDconst [63] x) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + z := v_0 + if z.Op != OpPPC64SRADconst || auxIntToInt64(z.AuxInt) != 63 { + break + } + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpPPC64SRDconst) + v.AuxInt = int64ToAuxInt(63) + v.AddArg(x) return true } return false @@ -4785,19 +4994,15 @@ func rewriteValuePPC64_OpPPC64CMPU(v *Value) bool { } func rewriteValuePPC64_OpPPC64CMPUconst(v *Value) bool { v_0 := v.Args[0] - // match: (CMPUconst [d] (Select0 (ANDCCconst z [c]))) + // match: (CMPUconst [d] (ANDconst z [c])) // cond: uint64(d) > uint64(c) // result: (FlagLT) for { d := auxIntToInt64(v.AuxInt) - if v_0.Op != OpSelect0 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst { + if v_0.Op != OpPPC64ANDconst { break } - c := auxIntToInt64(v_0_0.AuxInt) + c := auxIntToInt64(v_0.AuxInt) if !(uint64(d) > uint64(c)) { break } @@ -4849,18 +5054,18 @@ func rewriteValuePPC64_OpPPC64CMPUconst(v *Value) bool { v.reset(OpPPC64FlagGT) return true } - // match: (CMPUconst [0] (Select0 a:(ANDCCconst [n] z))) - // result: (Select1 a) + // match: (CMPUconst [0] a:(ANDconst [n] z)) + // result: (CMPconst [0] a) for { - if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 { + if auxIntToInt64(v.AuxInt) != 0 { break } - a := v_0.Args[0] - if a.Op != OpPPC64ANDCCconst { + a := v_0 + if a.Op != OpPPC64ANDconst { break } - v.reset(OpSelect1) - v.Type = types.TypeFlags + v.reset(OpPPC64CMPconst) + v.AuxInt = int64ToAuxInt(0) v.AddArg(a) return true } @@ -5030,19 +5235,15 @@ func rewriteValuePPC64_OpPPC64CMPWU(v *Value) bool { } func rewriteValuePPC64_OpPPC64CMPWUconst(v *Value) bool { v_0 := v.Args[0] - // match: (CMPWUconst [d] (Select0 (ANDCCconst z [c]))) + // match: (CMPWUconst [d] (ANDconst z [c])) // cond: uint64(d) > uint64(c) // result: (FlagLT) for { d := auxIntToInt32(v.AuxInt) - if v_0.Op != OpSelect0 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst { + if v_0.Op != OpPPC64ANDconst { break } - c := auxIntToInt64(v_0_0.AuxInt) + c := auxIntToInt64(v_0.AuxInt) if !(uint64(d) > uint64(c)) { break } @@ -5094,18 +5295,18 @@ func rewriteValuePPC64_OpPPC64CMPWUconst(v *Value) bool { v.reset(OpPPC64FlagGT) return true } - // match: (CMPWUconst [0] (Select0 a:(ANDCCconst [n] z))) - // result: (Select1 a) + // match: (CMPWUconst [0] a:(ANDconst [n] z)) + // result: (CMPconst [0] a) for { - if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpSelect0 { + if auxIntToInt32(v.AuxInt) != 0 { break } - a := v_0.Args[0] - if a.Op != OpPPC64ANDCCconst { + a := v_0 + if a.Op != OpPPC64ANDconst { break } - v.reset(OpSelect1) - v.Type = types.TypeFlags + v.reset(OpPPC64CMPconst) + v.AuxInt = int64ToAuxInt(0) v.AddArg(a) return true } @@ -5158,18 +5359,18 @@ func rewriteValuePPC64_OpPPC64CMPWconst(v *Value) bool { v.reset(OpPPC64FlagGT) return true } - // match: (CMPWconst [0] (Select0 a:(ANDCCconst [n] z))) - // result: (Select1 a) + // match: (CMPWconst [0] a:(ANDconst [n] z)) + // result: (CMPconst [0] a) for { - if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpSelect0 { + if auxIntToInt32(v.AuxInt) != 0 { break } - a := v_0.Args[0] - if a.Op != OpPPC64ANDCCconst { + a := v_0 + if a.Op != OpPPC64ANDconst { break } - v.reset(OpSelect1) - v.Type = types.TypeFlags + v.reset(OpPPC64CMPconst) + v.AuxInt = int64ToAuxInt(0) v.AddArg(a) return true } @@ -5222,21 +5423,6 @@ func rewriteValuePPC64_OpPPC64CMPconst(v *Value) bool { v.reset(OpPPC64FlagGT) return true } - // match: (CMPconst [0] (Select0 a:(ANDCCconst [n] z))) - // result: (Select1 a) - for { - if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 { - break - } - a := v_0.Args[0] - if a.Op != OpPPC64ANDCCconst { - break - } - v.reset(OpSelect1) - v.Type = types.TypeFlags - v.AddArg(a) - return true - } return false } func rewriteValuePPC64_OpPPC64Equal(v *Value) bool { @@ -5921,7 +6107,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ISEL [6] x y (Select1 (ANDCCconst [1] (SETBC [c] cmp)))) + // match: (ISEL [6] x y (CMPconst [0] (ANDconst [1] (SETBC [c] cmp)))) // result: (ISEL [c] x y cmp) for { if auxIntToInt32(v.AuxInt) != 6 { @@ -5929,11 +6115,11 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { } x := v_0 y := v_1 - if v_2.Op != OpSelect1 { + if v_2.Op != OpPPC64CMPconst || auxIntToInt64(v_2.AuxInt) != 0 { break } v_2_0 := v_2.Args[0] - if v_2_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0.AuxInt) != 1 { + if v_2_0.Op != OpPPC64ANDconst || auxIntToInt64(v_2_0.AuxInt) != 1 { break } v_2_0_0 := v_2_0.Args[0] @@ -6591,19 +6777,15 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MOVBZreg y:(Select0 (ANDCCconst [c] _))) + // match: (MOVBZreg y:(ANDconst [c] _)) // cond: uint64(c) <= 0xFF // result: y for { y := v_0 - if y.Op != OpSelect0 { + if y.Op != OpPPC64ANDconst { break } - y_0 := y.Args[0] - if y_0.Op != OpPPC64ANDCCconst { - break - } - c := auxIntToInt64(y_0.AuxInt) + c := auxIntToInt64(y.AuxInt) if !(uint64(c) <= 0xFF) { break } @@ -6951,19 +7133,15 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { } break } - // match: (MOVBZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) + // match: (MOVBZreg z:(ANDconst [c] (MOVBZload ptr x))) // result: z for { z := v_0 - if z.Op != OpSelect0 { + if z.Op != OpPPC64ANDconst { break } z_0 := z.Args[0] - if z_0.Op != OpPPC64ANDCCconst { - break - } - z_0_0 := z_0.Args[0] - if z_0_0.Op != OpPPC64MOVBZload { + if z_0.Op != OpPPC64MOVBZload { break } v.copyOf(z) @@ -7054,19 +7232,15 @@ func rewriteValuePPC64_OpPPC64MOVBreg(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MOVBreg y:(Select0 (ANDCCconst [c] _))) + // match: (MOVBreg y:(ANDconst [c] _)) // cond: uint64(c) <= 0x7F // result: y for { y := v_0 - if y.Op != OpSelect0 { + if y.Op != OpPPC64ANDconst { break } - y_0 := y.Args[0] - if y_0.Op != OpPPC64ANDCCconst { - break - } - c := auxIntToInt64(y_0.AuxInt) + c := auxIntToInt64(y.AuxInt) if !(uint64(c) <= 0x7F) { break } @@ -8472,19 +8646,15 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MOVHZreg y:(Select0 (ANDCCconst [c] _))) + // match: (MOVHZreg y:(ANDconst [c] _)) // cond: uint64(c) <= 0xFFFF // result: y for { y := v_0 - if y.Op != OpSelect0 { + if y.Op != OpPPC64ANDconst { break } - y_0 := y.Args[0] - if y_0.Op != OpPPC64ANDCCconst { - break - } - c := auxIntToInt64(y_0.AuxInt) + c := auxIntToInt64(y.AuxInt) if !(uint64(c) <= 0xFFFF) { break } @@ -8783,19 +8953,15 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { } break } - // match: (MOVHZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) + // match: (MOVHZreg z:(ANDconst [c] (MOVBZload ptr x))) // result: z for { z := v_0 - if z.Op != OpSelect0 { + if z.Op != OpPPC64ANDconst { break } z_0 := z.Args[0] - if z_0.Op != OpPPC64ANDCCconst { - break - } - z_0_0 := z_0.Args[0] - if z_0_0.Op != OpPPC64MOVBZload { + if z_0.Op != OpPPC64MOVBZload { break } v.copyOf(z) @@ -8820,19 +8986,15 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { } break } - // match: (MOVHZreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x)))) + // match: (MOVHZreg z:(ANDconst [c] (MOVHZload ptr x))) // result: z for { z := v_0 - if z.Op != OpSelect0 { + if z.Op != OpPPC64ANDconst { break } z_0 := z.Args[0] - if z_0.Op != OpPPC64ANDCCconst { - break - } - z_0_0 := z_0.Args[0] - if z_0_0.Op != OpPPC64MOVHZload { + if z_0.Op != OpPPC64MOVHZload { break } v.copyOf(z) @@ -9023,19 +9185,15 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MOVHreg y:(Select0 (ANDCCconst [c] _))) + // match: (MOVHreg y:(ANDconst [c] _)) // cond: uint64(c) <= 0x7FFF // result: y for { y := v_0 - if y.Op != OpSelect0 { - break - } - y_0 := y.Args[0] - if y_0.Op != OpPPC64ANDCCconst { + if y.Op != OpPPC64ANDconst { break } - c := auxIntToInt64(y_0.AuxInt) + c := auxIntToInt64(y.AuxInt) if !(uint64(c) <= 0x7FFF) { break } @@ -9814,19 +9972,15 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MOVWZreg y:(Select0 (ANDCCconst [c] _))) + // match: (MOVWZreg y:(ANDconst [c] _)) // cond: uint64(c) <= 0xFFFFFFFF // result: y for { y := v_0 - if y.Op != OpSelect0 { - break - } - y_0 := y.Args[0] - if y_0.Op != OpPPC64ANDCCconst { + if y.Op != OpPPC64ANDconst { break } - c := auxIntToInt64(y_0.AuxInt) + c := auxIntToInt64(y.AuxInt) if !(uint64(c) <= 0xFFFFFFFF) { break } @@ -10108,19 +10262,15 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { } break } - // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) + // match: (MOVWZreg z:(ANDconst [c] (MOVBZload ptr x))) // result: z for { z := v_0 - if z.Op != OpSelect0 { + if z.Op != OpPPC64ANDconst { break } z_0 := z.Args[0] - if z_0.Op != OpPPC64ANDCCconst { - break - } - z_0_0 := z_0.Args[0] - if z_0_0.Op != OpPPC64MOVBZload { + if z_0.Op != OpPPC64MOVBZload { break } v.copyOf(z) @@ -10145,37 +10295,29 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { } break } - // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x)))) + // match: (MOVWZreg z:(ANDconst [c] (MOVHZload ptr x))) // result: z for { z := v_0 - if z.Op != OpSelect0 { + if z.Op != OpPPC64ANDconst { break } z_0 := z.Args[0] - if z_0.Op != OpPPC64ANDCCconst { - break - } - z_0_0 := z_0.Args[0] - if z_0_0.Op != OpPPC64MOVHZload { + if z_0.Op != OpPPC64MOVHZload { break } v.copyOf(z) return true } - // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVWZload ptr x)))) + // match: (MOVWZreg z:(ANDconst [c] (MOVWZload ptr x))) // result: z for { z := v_0 - if z.Op != OpSelect0 { + if z.Op != OpPPC64ANDconst { break } z_0 := z.Args[0] - if z_0.Op != OpPPC64ANDCCconst { - break - } - z_0_0 := z_0.Args[0] - if z_0_0.Op != OpPPC64MOVWZload { + if z_0.Op != OpPPC64MOVWZload { break } v.copyOf(z) @@ -10400,19 +10542,15 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MOVWreg y:(Select0 (ANDCCconst [c] _))) + // match: (MOVWreg y:(ANDconst [c] _)) // cond: uint64(c) <= 0xFFFF // result: y for { y := v_0 - if y.Op != OpSelect0 { + if y.Op != OpPPC64ANDconst { break } - y_0 := y.Args[0] - if y_0.Op != OpPPC64ANDCCconst { - break - } - c := auxIntToInt64(y_0.AuxInt) + c := auxIntToInt64(y.AuxInt) if !(uint64(c) <= 0xFFFF) { break } @@ -11379,20 +11517,16 @@ func rewriteValuePPC64_OpPPC64RLWINM(v *Value) bool { v.AddArg(u) return true } - // match: (RLWINM [r] (Select0 (ANDCCconst [a] u))) + // match: (RLWINM [r] (ANDconst [a] u)) // cond: mergePPC64RlwinmAnd(r,uint32(a)) != 0 // result: (RLWINM [mergePPC64RlwinmAnd(r,uint32(a))] u) for { r := auxIntToInt64(v.AuxInt) - if v_0.Op != OpSelect0 { + if v_0.Op != OpPPC64ANDconst { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst { - break - } - a := auxIntToInt64(v_0_0.AuxInt) - u := v_0_0.Args[0] + a := auxIntToInt64(v_0.AuxInt) + u := v_0.Args[0] if !(mergePPC64RlwinmAnd(r, uint32(a)) != 0) { break } @@ -11468,20 +11602,16 @@ func rewriteValuePPC64_OpPPC64ROTLWconst(v *Value) bool { } break } - // match: (ROTLWconst [r] (Select0 (ANDCCconst [m] x))) + // match: (ROTLWconst [r] (ANDconst [m] x)) // cond: isPPC64WordRotateMask(m) // result: (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x) for { r := auxIntToInt64(v.AuxInt) - if v_0.Op != OpSelect0 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst { + if v_0.Op != OpPPC64ANDconst { break } - m := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] + m := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] if !(isPPC64WordRotateMask(m)) { break } @@ -11635,25 +11765,19 @@ func rewriteValuePPC64_OpPPC64SETBC(v *Value) bool { v.AddArg(bool) return true } - // match: (SETBC [2] (Select1 a:(ANDCCconst [1] _))) - // result: (XORconst [1] (Select0 a)) + // match: (SETBC [2] (CMPconst [0] a:(ANDconst [1] _))) + // result: (XORconst [1] a) for { - if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpSelect1 { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } a := v_0.Args[0] - if a.Op != OpPPC64ANDCCconst { - break - } - t := a.Type - if auxIntToInt64(a.AuxInt) != 1 { + if a.Op != OpPPC64ANDconst || auxIntToInt64(a.AuxInt) != 1 { break } v.reset(OpPPC64XORconst) v.AuxInt = int64ToAuxInt(1) - v0 := b.NewValue0(v.Pos, OpSelect0, t.FieldType(0)) - v0.AddArg(a) - v.AddArg(v0) + v.AddArg(a) return true } // match: (SETBC [2] (CMPconst [0] a:(AND y z))) @@ -11876,18 +12000,17 @@ func rewriteValuePPC64_OpPPC64SETBCR(v *Value) bool { v.AddArg(bool) return true } - // match: (SETBCR [2] (Select1 a:(ANDCCconst [1] _))) - // result: (Select0 a) + // match: (SETBCR [2] (CMPconst [0] a:(ANDconst [1] _))) + // result: a for { - if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpSelect1 { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } a := v_0.Args[0] - if a.Op != OpPPC64ANDCCconst || auxIntToInt64(a.AuxInt) != 1 { + if a.Op != OpPPC64ANDconst || auxIntToInt64(a.AuxInt) != 1 { break } - v.reset(OpSelect0) - v.AddArg(a) + v.copyOf(a) return true } // match: (SETBCR [2] (CMPconst [0] a:(AND y z))) @@ -12077,21 +12200,17 @@ func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool { v.AddArg(x) return true } - // match: (SLDconst [c] z:(Select0 (ANDCCconst [d] x))) + // match: (SLDconst [c] z:(ANDconst [d] x)) // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) for { c := auxIntToInt64(v.AuxInt) z := v_0 - if z.Op != OpSelect0 { + if z.Op != OpPPC64ANDconst { break } - z_0 := z.Args[0] - if z_0.Op != OpPPC64ANDCCconst { - break - } - d := auxIntToInt64(z_0.AuxInt) - x := z_0.Args[0] + d := auxIntToInt64(z.AuxInt) + x := z.Args[0] if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))) { break } @@ -12217,21 +12336,17 @@ func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool { v.AddArg(x) return true } - // match: (SLWconst [c] z:(Select0 (ANDCCconst [d] x))) + // match: (SLWconst [c] z:(ANDconst [d] x)) // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) for { c := auxIntToInt64(v.AuxInt) z := v_0 - if z.Op != OpSelect0 { - break - } - z_0 := z.Args[0] - if z_0.Op != OpPPC64ANDCCconst { + if z.Op != OpPPC64ANDconst { break } - d := auxIntToInt64(z_0.AuxInt) - x := z_0.Args[0] + d := auxIntToInt64(z.AuxInt) + x := z.Args[0] if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (32-getPPC64ShiftMaskLength(d))) { break } @@ -12362,19 +12477,15 @@ func rewriteValuePPC64_OpPPC64SRW(v *Value) bool { } func rewriteValuePPC64_OpPPC64SRWconst(v *Value) bool { v_0 := v.Args[0] - // match: (SRWconst (Select0 (ANDCCconst [m] x)) [s]) + // match: (SRWconst (ANDconst [m] x) [s]) // cond: mergePPC64RShiftMask(m>>uint(s),s,32) == 0 // result: (MOVDconst [0]) for { s := auxIntToInt64(v.AuxInt) - if v_0.Op != OpSelect0 { + if v_0.Op != OpPPC64ANDconst { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst { - break - } - m := auxIntToInt64(v_0_0.AuxInt) + m := auxIntToInt64(v_0.AuxInt) if !(mergePPC64RShiftMask(m>>uint(s), s, 32) == 0) { break } @@ -12382,20 +12493,16 @@ func rewriteValuePPC64_OpPPC64SRWconst(v *Value) bool { v.AuxInt = int64ToAuxInt(0) return true } - // match: (SRWconst (Select0 (ANDCCconst [m] x)) [s]) + // match: (SRWconst (ANDconst [m] x) [s]) // cond: mergePPC64AndSrwi(m>>uint(s),s) != 0 // result: (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x) for { s := auxIntToInt64(v.AuxInt) - if v_0.Op != OpSelect0 { + if v_0.Op != OpPPC64ANDconst { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst { - break - } - m := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] + m := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] if !(mergePPC64AndSrwi(m>>uint(s), s) != 0) { break } @@ -12872,7 +12979,7 @@ func rewriteValuePPC64_OpRsh16Ux16(v *Value) bool { return true } // match: (Rsh16Ux16 x y) - // result: (ISEL [2] (SRD (MOVHZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFF0] y))) + // result: (ISEL [2] (SRD (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF0] y))) for { t := v.Type x := v_0 @@ -12885,8 +12992,9 @@ func rewriteValuePPC64_OpRsh16Ux16(v *Value) bool { v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v2.AuxInt = int64ToAuxInt(0) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(0) + v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v4.AuxInt = int64ToAuxInt(0xFFF0) v4.AddArg(y) v3.AddArg(v4) @@ -13016,7 +13124,7 @@ func rewriteValuePPC64_OpRsh16Ux8(v *Value) bool { return true } // match: (Rsh16Ux8 x y) - // result: (ISEL [2] (SRD (MOVHZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00F0] y))) + // result: (ISEL [2] (SRD (MOVHZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F0] y))) for { t := v.Type x := v_0 @@ -13029,8 +13137,9 @@ func rewriteValuePPC64_OpRsh16Ux8(v *Value) bool { v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v2.AuxInt = int64ToAuxInt(0) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(0) + v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v4.AuxInt = int64ToAuxInt(0x00F0) v4.AddArg(y) v3.AddArg(v4) @@ -13059,7 +13168,7 @@ func rewriteValuePPC64_OpRsh16x16(v *Value) bool { return true } // match: (Rsh16x16 x y) - // result: (ISEL [2] (SRAD (MOVHreg x) y) (SRADconst (MOVHreg x) [15]) (Select1 (ANDCCconst [0xFFF0] y))) + // result: (ISEL [2] (SRAD (MOVHreg x) y) (SRADconst (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0xFFF0] y))) for { t := v.Type x := v_0 @@ -13073,8 +13182,9 @@ func rewriteValuePPC64_OpRsh16x16(v *Value) bool { v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) v2.AuxInt = int64ToAuxInt(15) v2.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(0) + v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v4.AuxInt = int64ToAuxInt(0xFFF0) v4.AddArg(y) v3.AddArg(v4) @@ -13225,7 +13335,7 @@ func rewriteValuePPC64_OpRsh16x8(v *Value) bool { return true } // match: (Rsh16x8 x y) - // result: (ISEL [2] (SRAD (MOVHreg x) y) (SRADconst (MOVHreg x) [15]) (Select1 (ANDCCconst [0x00F0] y))) + // result: (ISEL [2] (SRAD (MOVHreg x) y) (SRADconst (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0x00F0] y))) for { t := v.Type x := v_0 @@ -13239,8 +13349,9 @@ func rewriteValuePPC64_OpRsh16x8(v *Value) bool { v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) v2.AuxInt = int64ToAuxInt(15) v2.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(0) + v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v4.AuxInt = int64ToAuxInt(0x00F0) v4.AddArg(y) v3.AddArg(v4) @@ -13267,7 +13378,7 @@ func rewriteValuePPC64_OpRsh32Ux16(v *Value) bool { return true } // match: (Rsh32Ux16 x y) - // result: (ISEL [2] (SRW x y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFE0] y))) + // result: (ISEL [2] (SRW x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFE0] y))) for { t := v.Type x := v_0 @@ -13278,8 +13389,9 @@ func rewriteValuePPC64_OpRsh32Ux16(v *Value) bool { v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(0) - v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v3.AuxInt = int64ToAuxInt(0xFFE0) v3.AddArg(y) v2.AddArg(v3) @@ -13397,7 +13509,7 @@ func rewriteValuePPC64_OpRsh32Ux8(v *Value) bool { return true } // match: (Rsh32Ux8 x y) - // result: (ISEL [2] (SRW x y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00E0] y))) + // result: (ISEL [2] (SRW x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00E0] y))) for { t := v.Type x := v_0 @@ -13408,8 +13520,9 @@ func rewriteValuePPC64_OpRsh32Ux8(v *Value) bool { v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(0) - v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v3.AuxInt = int64ToAuxInt(0x00E0) v3.AddArg(y) v2.AddArg(v3) @@ -13436,7 +13549,7 @@ func rewriteValuePPC64_OpRsh32x16(v *Value) bool { return true } // match: (Rsh32x16 x y) - // result: (ISEL [2] (SRAW x y) (SRAWconst x [31]) (Select1 (ANDCCconst [0xFFE0] y))) + // result: (ISEL [2] (SRAW x y) (SRAWconst x [31]) (CMPconst [0] (ANDconst [0xFFE0] y))) for { t := v.Type x := v_0 @@ -13448,8 +13561,9 @@ func rewriteValuePPC64_OpRsh32x16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpPPC64SRAWconst, t) v1.AuxInt = int64ToAuxInt(31) v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v3.AuxInt = int64ToAuxInt(0xFFE0) v3.AddArg(y) v2.AddArg(v3) @@ -13584,7 +13698,7 @@ func rewriteValuePPC64_OpRsh32x8(v *Value) bool { return true } // match: (Rsh32x8 x y) - // result: (ISEL [2] (SRAW x y) (SRAWconst x [31]) (Select1 (ANDCCconst [0x00E0] y))) + // result: (ISEL [2] (SRAW x y) (SRAWconst x [31]) (CMPconst [0] (ANDconst [0x00E0] y))) for { t := v.Type x := v_0 @@ -13596,8 +13710,9 @@ func rewriteValuePPC64_OpRsh32x8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpPPC64SRAWconst, t) v1.AuxInt = int64ToAuxInt(31) v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v3.AuxInt = int64ToAuxInt(0x00E0) v3.AddArg(y) v2.AddArg(v3) @@ -13624,7 +13739,7 @@ func rewriteValuePPC64_OpRsh64Ux16(v *Value) bool { return true } // match: (Rsh64Ux16 x y) - // result: (ISEL [2] (SRD x y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFC0] y))) + // result: (ISEL [2] (SRD x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFC0] y))) for { t := v.Type x := v_0 @@ -13635,8 +13750,9 @@ func rewriteValuePPC64_OpRsh64Ux16(v *Value) bool { v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(0) - v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v3.AuxInt = int64ToAuxInt(0xFFC0) v3.AddArg(y) v2.AddArg(v3) @@ -13754,7 +13870,7 @@ func rewriteValuePPC64_OpRsh64Ux8(v *Value) bool { return true } // match: (Rsh64Ux8 x y) - // result: (ISEL [2] (SRD x y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00C0] y))) + // result: (ISEL [2] (SRD x y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00C0] y))) for { t := v.Type x := v_0 @@ -13765,8 +13881,9 @@ func rewriteValuePPC64_OpRsh64Ux8(v *Value) bool { v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(0) - v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v3.AuxInt = int64ToAuxInt(0x00C0) v3.AddArg(y) v2.AddArg(v3) @@ -13793,7 +13910,7 @@ func rewriteValuePPC64_OpRsh64x16(v *Value) bool { return true } // match: (Rsh64x16 x y) - // result: (ISEL [2] (SRAD x y) (SRADconst x [63]) (Select1 (ANDCCconst [0xFFC0] y))) + // result: (ISEL [2] (SRAD x y) (SRADconst x [63]) (CMPconst [0] (ANDconst [0xFFC0] y))) for { t := v.Type x := v_0 @@ -13805,8 +13922,9 @@ func rewriteValuePPC64_OpRsh64x16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) v1.AuxInt = int64ToAuxInt(63) v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v3.AuxInt = int64ToAuxInt(0xFFC0) v3.AddArg(y) v2.AddArg(v3) @@ -13941,7 +14059,7 @@ func rewriteValuePPC64_OpRsh64x8(v *Value) bool { return true } // match: (Rsh64x8 x y) - // result: (ISEL [2] (SRAD x y) (SRADconst x [63]) (Select1 (ANDCCconst [0x00C0] y))) + // result: (ISEL [2] (SRAD x y) (SRADconst x [63]) (CMPconst [0] (ANDconst [0x00C0] y))) for { t := v.Type x := v_0 @@ -13953,8 +14071,9 @@ func rewriteValuePPC64_OpRsh64x8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) v1.AuxInt = int64ToAuxInt(63) v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v3.AuxInt = int64ToAuxInt(0x00C0) v3.AddArg(y) v2.AddArg(v3) @@ -13983,7 +14102,7 @@ func rewriteValuePPC64_OpRsh8Ux16(v *Value) bool { return true } // match: (Rsh8Ux16 x y) - // result: (ISEL [2] (SRD (MOVBZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFF8] y))) + // result: (ISEL [2] (SRD (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0xFFF8] y))) for { t := v.Type x := v_0 @@ -13996,8 +14115,9 @@ func rewriteValuePPC64_OpRsh8Ux16(v *Value) bool { v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v2.AuxInt = int64ToAuxInt(0) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(0) + v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v4.AuxInt = int64ToAuxInt(0xFFF8) v4.AddArg(y) v3.AddArg(v4) @@ -14127,7 +14247,7 @@ func rewriteValuePPC64_OpRsh8Ux8(v *Value) bool { return true } // match: (Rsh8Ux8 x y) - // result: (ISEL [2] (SRD (MOVBZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00F8] y))) + // result: (ISEL [2] (SRD (MOVBZreg x) y) (MOVDconst [0]) (CMPconst [0] (ANDconst [0x00F8] y))) for { t := v.Type x := v_0 @@ -14140,8 +14260,9 @@ func rewriteValuePPC64_OpRsh8Ux8(v *Value) bool { v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v2.AuxInt = int64ToAuxInt(0) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(0) + v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v4.AuxInt = int64ToAuxInt(0x00F8) v4.AddArg(y) v3.AddArg(v4) @@ -14170,7 +14291,7 @@ func rewriteValuePPC64_OpRsh8x16(v *Value) bool { return true } // match: (Rsh8x16 x y) - // result: (ISEL [2] (SRAD (MOVBreg x) y) (SRADconst (MOVBreg x) [7]) (Select1 (ANDCCconst [0xFFF8] y))) + // result: (ISEL [2] (SRAD (MOVBreg x) y) (SRADconst (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0xFFF8] y))) for { t := v.Type x := v_0 @@ -14184,8 +14305,9 @@ func rewriteValuePPC64_OpRsh8x16(v *Value) bool { v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) v2.AuxInt = int64ToAuxInt(7) v2.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(0) + v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v4.AuxInt = int64ToAuxInt(0xFFF8) v4.AddArg(y) v3.AddArg(v4) @@ -14336,7 +14458,7 @@ func rewriteValuePPC64_OpRsh8x8(v *Value) bool { return true } // match: (Rsh8x8 x y) - // result: (ISEL [2] (SRAD (MOVBreg x) y) (SRADconst (MOVBreg x) [7]) (Select1 (ANDCCconst [0x00F8] y))) + // result: (ISEL [2] (SRAD (MOVBreg x) y) (SRADconst (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0x00F8] y))) for { t := v.Type x := v_0 @@ -14350,8 +14472,9 @@ func rewriteValuePPC64_OpRsh8x8(v *Value) bool { v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) v2.AuxInt = int64ToAuxInt(7) v2.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(0) + v4 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int) v4.AuxInt = int64ToAuxInt(0x00F8) v4.AddArg(y) v3.AddArg(v4) @@ -14417,271 +14540,6 @@ func rewriteValuePPC64_OpSelect0(v *Value) bool { v.AddArg(v0) return true } - // match: (Select0 (ANDCCconst [m] (ROTLWconst [r] x))) - // cond: isPPC64WordRotateMask(m) - // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x) - for { - if v_0.Op != OpPPC64ANDCCconst { - break - } - m := auxIntToInt64(v_0.AuxInt) - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ROTLWconst { - break - } - r := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - if !(isPPC64WordRotateMask(m)) { - break - } - v.reset(OpPPC64RLWINM) - v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32)) - v.AddArg(x) - return true - } - // match: (Select0 (ANDCCconst [m] (ROTLW x r))) - // cond: isPPC64WordRotateMask(m) - // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r) - for { - if v_0.Op != OpPPC64ANDCCconst { - break - } - m := auxIntToInt64(v_0.AuxInt) - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ROTLW { - break - } - r := v_0_0.Args[1] - x := v_0_0.Args[0] - if !(isPPC64WordRotateMask(m)) { - break - } - v.reset(OpPPC64RLWNM) - v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32)) - v.AddArg2(x, r) - return true - } - // match: (Select0 (ANDCCconst [m] (SRWconst x [s]))) - // cond: mergePPC64RShiftMask(m,s,32) == 0 - // result: (MOVDconst [0]) - for { - if v_0.Op != OpPPC64ANDCCconst { - break - } - m := auxIntToInt64(v_0.AuxInt) - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64SRWconst { - break - } - s := auxIntToInt64(v_0_0.AuxInt) - if !(mergePPC64RShiftMask(m, s, 32) == 0) { - break - } - v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(0) - return true - } - // match: (Select0 (ANDCCconst [m] (SRWconst x [s]))) - // cond: mergePPC64AndSrwi(m,s) != 0 - // result: (RLWINM [mergePPC64AndSrwi(m,s)] x) - for { - if v_0.Op != OpPPC64ANDCCconst { - break - } - m := auxIntToInt64(v_0.AuxInt) - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64SRWconst { - break - } - s := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - if !(mergePPC64AndSrwi(m, s) != 0) { - break - } - v.reset(OpPPC64RLWINM) - v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s)) - v.AddArg(x) - return true - } - // match: (Select0 (ANDCCconst [-1] x)) - // result: x - for { - if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != -1 { - break - } - x := v_0.Args[0] - v.copyOf(x) - return true - } - // match: (Select0 (ANDCCconst [0] _)) - // result: (MOVDconst [0]) - for { - if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0 { - break - } - v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(0) - return true - } - // match: (Select0 (ANDCCconst [c] y:(MOVBZreg _))) - // cond: c&0xFF == 0xFF - // result: y - for { - if v_0.Op != OpPPC64ANDCCconst { - break - } - c := auxIntToInt64(v_0.AuxInt) - y := v_0.Args[0] - if y.Op != OpPPC64MOVBZreg || !(c&0xFF == 0xFF) { - break - } - v.copyOf(y) - return true - } - // match: (Select0 (ANDCCconst [0xFF] (MOVBreg x))) - // result: (MOVBZreg x) - for { - if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0xFF { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64MOVBreg { - break - } - x := v_0_0.Args[0] - v.reset(OpPPC64MOVBZreg) - v.AddArg(x) - return true - } - // match: (Select0 (ANDCCconst [c] y:(MOVHZreg _))) - // cond: c&0xFFFF == 0xFFFF - // result: y - for { - if v_0.Op != OpPPC64ANDCCconst { - break - } - c := auxIntToInt64(v_0.AuxInt) - y := v_0.Args[0] - if y.Op != OpPPC64MOVHZreg || !(c&0xFFFF == 0xFFFF) { - break - } - v.copyOf(y) - return true - } - // match: (Select0 (ANDCCconst [0xFFFF] (MOVHreg x))) - // result: (MOVHZreg x) - for { - if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0xFFFF { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64MOVHreg { - break - } - x := v_0_0.Args[0] - v.reset(OpPPC64MOVHZreg) - v.AddArg(x) - return true - } - // match: (Select0 (ANDCCconst [c] (MOVBZreg x))) - // result: (Select0 (ANDCCconst [c&0xFF] x)) - for { - if v_0.Op != OpPPC64ANDCCconst { - break - } - c := auxIntToInt64(v_0.AuxInt) - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64MOVBZreg { - break - } - x := v_0_0.Args[0] - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v0.AuxInt = int64ToAuxInt(c & 0xFF) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Select0 (ANDCCconst [c] (MOVHZreg x))) - // result: (Select0 (ANDCCconst [c&0xFFFF] x)) - for { - if v_0.Op != OpPPC64ANDCCconst { - break - } - c := auxIntToInt64(v_0.AuxInt) - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64MOVHZreg { - break - } - x := v_0_0.Args[0] - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v0.AuxInt = int64ToAuxInt(c & 0xFFFF) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Select0 (ANDCCconst [c] (MOVWZreg x))) - // result: (Select0 (ANDCCconst [c&0xFFFFFFFF] x)) - for { - if v_0.Op != OpPPC64ANDCCconst { - break - } - c := auxIntToInt64(v_0.AuxInt) - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64MOVWZreg { - break - } - x := v_0_0.Args[0] - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v0.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Select0 (ANDCCconst [m] (RLWINM [r] y))) - // cond: mergePPC64AndRlwinm(uint32(m),r) != 0 - // result: (RLWINM [mergePPC64AndRlwinm(uint32(m),r)] y) - for { - if v_0.Op != OpPPC64ANDCCconst { - break - } - m := auxIntToInt64(v_0.AuxInt) - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64RLWINM { - break - } - r := auxIntToInt64(v_0_0.AuxInt) - y := v_0_0.Args[0] - if !(mergePPC64AndRlwinm(uint32(m), r) != 0) { - break - } - v.reset(OpPPC64RLWINM) - v.AuxInt = int64ToAuxInt(mergePPC64AndRlwinm(uint32(m), r)) - v.AddArg(y) - return true - } - // match: (Select0 (ANDCCconst [1] z:(SRADconst [63] x))) - // cond: z.Uses == 1 - // result: (SRDconst [63] x) - for { - if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 1 { - break - } - z := v_0.Args[0] - if z.Op != OpPPC64SRADconst || auxIntToInt64(z.AuxInt) != 63 { - break - } - x := z.Args[0] - if !(z.Uses == 1) { - break - } - v.reset(OpPPC64SRDconst) - v.AuxInt = int64ToAuxInt(63) - v.AddArg(x) - return true - } return false } func rewriteValuePPC64_OpSelect1(v *Value) bool { @@ -14786,15 +14644,6 @@ func rewriteValuePPC64_OpSelect1(v *Value) bool { v.copyOf(x) return true } - // match: (Select1 (ANDCCconst [0] _)) - // result: (FlagEQ) - for { - if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0 { - break - } - v.reset(OpPPC64FlagEQ) - return true - } return false } func rewriteValuePPC64_OpSelectN(v *Value) bool { @@ -15421,46 +15270,6 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64EQ, cmp) return true } - // match: (EQ (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) - // result: (EQ (Select1 z) yes no) - for b.Controls[0].Op == OpPPC64CMPconst { - v_0 := b.Controls[0] - if auxIntToInt64(v_0.AuxInt) != 0 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSelect0 { - break - } - z := v_0_0.Args[0] - if z.Op != OpPPC64ANDCCconst { - break - } - v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) - v0.AddArg(z) - b.resetWithControl(BlockPPC64EQ, v0) - return true - } - // match: (EQ (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) - // result: (EQ (Select1 z) yes no) - for b.Controls[0].Op == OpPPC64CMPWconst { - v_0 := b.Controls[0] - if auxIntToInt32(v_0.AuxInt) != 0 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSelect0 { - break - } - z := v_0_0.Args[0] - if z.Op != OpPPC64ANDCCconst { - break - } - v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) - v0.AddArg(z) - b.resetWithControl(BlockPPC64EQ, v0) - return true - } // match: (EQ (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (EQ (Select1 (ANDCC x y)) yes no) @@ -15579,46 +15388,6 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64LE, cmp) return true } - // match: (GE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) - // result: (GE (Select1 z) yes no) - for b.Controls[0].Op == OpPPC64CMPconst { - v_0 := b.Controls[0] - if auxIntToInt64(v_0.AuxInt) != 0 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSelect0 { - break - } - z := v_0_0.Args[0] - if z.Op != OpPPC64ANDCCconst { - break - } - v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) - v0.AddArg(z) - b.resetWithControl(BlockPPC64GE, v0) - return true - } - // match: (GE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) - // result: (GE (Select1 z) yes no) - for b.Controls[0].Op == OpPPC64CMPWconst { - v_0 := b.Controls[0] - if auxIntToInt32(v_0.AuxInt) != 0 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSelect0 { - break - } - z := v_0_0.Args[0] - if z.Op != OpPPC64ANDCCconst { - break - } - v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) - v0.AddArg(z) - b.resetWithControl(BlockPPC64GE, v0) - return true - } // match: (GE (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (GE (Select1 (ANDCC x y)) yes no) @@ -15738,46 +15507,6 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64LT, cmp) return true } - // match: (GT (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) - // result: (GT (Select1 z) yes no) - for b.Controls[0].Op == OpPPC64CMPconst { - v_0 := b.Controls[0] - if auxIntToInt64(v_0.AuxInt) != 0 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSelect0 { - break - } - z := v_0_0.Args[0] - if z.Op != OpPPC64ANDCCconst { - break - } - v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) - v0.AddArg(z) - b.resetWithControl(BlockPPC64GT, v0) - return true - } - // match: (GT (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) - // result: (GT (Select1 z) yes no) - for b.Controls[0].Op == OpPPC64CMPWconst { - v_0 := b.Controls[0] - if auxIntToInt32(v_0.AuxInt) != 0 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSelect0 { - break - } - z := v_0_0.Args[0] - if z.Op != OpPPC64ANDCCconst { - break - } - v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) - v0.AddArg(z) - b.resetWithControl(BlockPPC64GT, v0) - return true - } // match: (GT (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (GT (Select1 (ANDCC x y)) yes no) @@ -15950,11 +15679,12 @@ func rewriteBlockPPC64(b *Block) bool { return true } // match: (If cond yes no) - // result: (NE (Select1 (ANDCCconst [1] cond)) yes no) + // result: (NE (CMPconst [0] (ANDconst [1] cond)) yes no) for { cond := b.Controls[0] - v0 := b.NewValue0(cond.Pos, OpSelect1, types.TypeFlags) - v1 := b.NewValue0(cond.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0 := b.NewValue0(cond.Pos, OpPPC64CMPconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(cond.Pos, OpPPC64ANDconst, typ.Int) v1.AuxInt = int64ToAuxInt(1) v1.AddArg(cond) v0.AddArg(v1) @@ -15989,46 +15719,6 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64GE, cmp) return true } - // match: (LE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) - // result: (LE (Select1 z) yes no) - for b.Controls[0].Op == OpPPC64CMPconst { - v_0 := b.Controls[0] - if auxIntToInt64(v_0.AuxInt) != 0 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSelect0 { - break - } - z := v_0_0.Args[0] - if z.Op != OpPPC64ANDCCconst { - break - } - v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) - v0.AddArg(z) - b.resetWithControl(BlockPPC64LE, v0) - return true - } - // match: (LE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) - // result: (LE (Select1 z) yes no) - for b.Controls[0].Op == OpPPC64CMPWconst { - v_0 := b.Controls[0] - if auxIntToInt32(v_0.AuxInt) != 0 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSelect0 { - break - } - z := v_0_0.Args[0] - if z.Op != OpPPC64ANDCCconst { - break - } - v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) - v0.AddArg(z) - b.resetWithControl(BlockPPC64LE, v0) - return true - } // match: (LE (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (LE (Select1 (ANDCC x y)) yes no) @@ -16148,46 +15838,6 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64GT, cmp) return true } - // match: (LT (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) - // result: (LT (Select1 z) yes no) - for b.Controls[0].Op == OpPPC64CMPconst { - v_0 := b.Controls[0] - if auxIntToInt64(v_0.AuxInt) != 0 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSelect0 { - break - } - z := v_0_0.Args[0] - if z.Op != OpPPC64ANDCCconst { - break - } - v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) - v0.AddArg(z) - b.resetWithControl(BlockPPC64LT, v0) - return true - } - // match: (LT (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) - // result: (LT (Select1 z) yes no) - for b.Controls[0].Op == OpPPC64CMPWconst { - v_0 := b.Controls[0] - if auxIntToInt32(v_0.AuxInt) != 0 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSelect0 { - break - } - z := v_0_0.Args[0] - if z.Op != OpPPC64ANDCCconst { - break - } - v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) - v0.AddArg(z) - b.resetWithControl(BlockPPC64LT, v0) - return true - } // match: (LT (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (LT (Select1 (ANDCC x y)) yes no) @@ -16279,12 +15929,15 @@ func rewriteBlockPPC64(b *Block) bool { break } case BlockPPC64NE: - // match: (NE (Select1 (ANDCCconst [1] (Equal cc))) yes no) + // match: (NE (CMPconst [0] (ANDconst [1] (Equal cc))) yes no) // result: (EQ cc yes no) - for b.Controls[0].Op == OpSelect1 { + for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { break } v_0_0_0 := v_0_0.Args[0] @@ -16295,12 +15948,15 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64EQ, cc) return true } - // match: (NE (Select1 (ANDCCconst [1] (NotEqual cc))) yes no) + // match: (NE (CMPconst [0] (ANDconst [1] (NotEqual cc))) yes no) // result: (NE cc yes no) - for b.Controls[0].Op == OpSelect1 { + for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { break } v_0_0_0 := v_0_0.Args[0] @@ -16311,12 +15967,15 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64NE, cc) return true } - // match: (NE (Select1 (ANDCCconst [1] (LessThan cc))) yes no) + // match: (NE (CMPconst [0] (ANDconst [1] (LessThan cc))) yes no) // result: (LT cc yes no) - for b.Controls[0].Op == OpSelect1 { + for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { break } v_0_0_0 := v_0_0.Args[0] @@ -16327,12 +15986,15 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64LT, cc) return true } - // match: (NE (Select1 (ANDCCconst [1] (LessEqual cc))) yes no) + // match: (NE (CMPconst [0] (ANDconst [1] (LessEqual cc))) yes no) // result: (LE cc yes no) - for b.Controls[0].Op == OpSelect1 { + for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { break } v_0_0_0 := v_0_0.Args[0] @@ -16343,12 +16005,15 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64LE, cc) return true } - // match: (NE (Select1 (ANDCCconst [1] (GreaterThan cc))) yes no) + // match: (NE (CMPconst [0] (ANDconst [1] (GreaterThan cc))) yes no) // result: (GT cc yes no) - for b.Controls[0].Op == OpSelect1 { + for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { break } v_0_0_0 := v_0_0.Args[0] @@ -16359,12 +16024,15 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64GT, cc) return true } - // match: (NE (Select1 (ANDCCconst [1] (GreaterEqual cc))) yes no) + // match: (NE (CMPconst [0] (ANDconst [1] (GreaterEqual cc))) yes no) // result: (GE cc yes no) - for b.Controls[0].Op == OpSelect1 { + for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { break } v_0_0_0 := v_0_0.Args[0] @@ -16375,12 +16043,15 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64GE, cc) return true } - // match: (NE (Select1 (ANDCCconst [1] (FLessThan cc))) yes no) + // match: (NE (CMPconst [0] (ANDconst [1] (FLessThan cc))) yes no) // result: (FLT cc yes no) - for b.Controls[0].Op == OpSelect1 { + for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { break } v_0_0_0 := v_0_0.Args[0] @@ -16391,12 +16062,15 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64FLT, cc) return true } - // match: (NE (Select1 (ANDCCconst [1] (FLessEqual cc))) yes no) + // match: (NE (CMPconst [0] (ANDconst [1] (FLessEqual cc))) yes no) // result: (FLE cc yes no) - for b.Controls[0].Op == OpSelect1 { + for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { break } v_0_0_0 := v_0_0.Args[0] @@ -16407,12 +16081,15 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64FLE, cc) return true } - // match: (NE (Select1 (ANDCCconst [1] (FGreaterThan cc))) yes no) + // match: (NE (CMPconst [0] (ANDconst [1] (FGreaterThan cc))) yes no) // result: (FGT cc yes no) - for b.Controls[0].Op == OpSelect1 { + for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { break } v_0_0_0 := v_0_0.Args[0] @@ -16423,12 +16100,15 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64FGT, cc) return true } - // match: (NE (Select1 (ANDCCconst [1] (FGreaterEqual cc))) yes no) + // match: (NE (CMPconst [0] (ANDconst [1] (FGreaterEqual cc))) yes no) // result: (FGE cc yes no) - for b.Controls[0].Op == OpSelect1 { + for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { break } v_0_0_0 := v_0_0.Args[0] @@ -16466,46 +16146,6 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64NE, cmp) return true } - // match: (NE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) - // result: (NE (Select1 z) yes no) - for b.Controls[0].Op == OpPPC64CMPconst { - v_0 := b.Controls[0] - if auxIntToInt64(v_0.AuxInt) != 0 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSelect0 { - break - } - z := v_0_0.Args[0] - if z.Op != OpPPC64ANDCCconst { - break - } - v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) - v0.AddArg(z) - b.resetWithControl(BlockPPC64NE, v0) - return true - } - // match: (NE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) - // result: (NE (Select1 z) yes no) - for b.Controls[0].Op == OpPPC64CMPWconst { - v_0 := b.Controls[0] - if auxIntToInt32(v_0.AuxInt) != 0 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpSelect0 { - break - } - z := v_0_0.Args[0] - if z.Op != OpPPC64ANDCCconst { - break - } - v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) - v0.AddArg(z) - b.resetWithControl(BlockPPC64NE, v0) - return true - } // match: (NE (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (NE (Select1 (ANDCC x y)) yes no) diff --git a/src/cmd/compile/internal/ssa/rewritePPC64latelower.go b/src/cmd/compile/internal/ssa/rewritePPC64latelower.go index 771dd6aaa2..23d8601fb4 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64latelower.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64latelower.go @@ -3,7 +3,6 @@ package ssa import "internal/buildcfg" -import "cmd/compile/internal/types" func rewriteValuePPC64latelower(v *Value) bool { switch v.Op { @@ -11,18 +10,20 @@ func rewriteValuePPC64latelower(v *Value) bool { return rewriteValuePPC64latelower_OpPPC64ADD(v) case OpPPC64AND: return rewriteValuePPC64latelower_OpPPC64AND(v) + case OpPPC64ANDconst: + return rewriteValuePPC64latelower_OpPPC64ANDconst(v) case OpPPC64CMPconst: return rewriteValuePPC64latelower_OpPPC64CMPconst(v) case OpPPC64ISEL: return rewriteValuePPC64latelower_OpPPC64ISEL(v) case OpPPC64RLDICL: return rewriteValuePPC64latelower_OpPPC64RLDICL(v) + case OpPPC64RLDICLCC: + return rewriteValuePPC64latelower_OpPPC64RLDICLCC(v) case OpPPC64SETBC: return rewriteValuePPC64latelower_OpPPC64SETBC(v) case OpPPC64SETBCR: return rewriteValuePPC64latelower_OpPPC64SETBCR(v) - case OpSelect0: - return rewriteValuePPC64latelower_OpSelect0(v) } return false } @@ -54,11 +55,9 @@ func rewriteValuePPC64latelower_OpPPC64ADD(v *Value) bool { func rewriteValuePPC64latelower_OpPPC64AND(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types // match: (AND x:(MOVDconst [m]) n) // cond: t.Size() <= 2 - // result: (Select0 (ANDCCconst [int64(int16(m))] n)) + // result: (ANDconst [int64(int16(m))] n) for { t := v.Type for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -71,11 +70,9 @@ func rewriteValuePPC64latelower_OpPPC64AND(v *Value) bool { if !(t.Size() <= 2) { continue } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v0.AuxInt = int64ToAuxInt(int64(int16(m))) - v0.AddArg(n) - v.AddArg(v0) + v.reset(OpPPC64ANDconst) + v.AuxInt = int64ToAuxInt(int64(int16(m))) + v.AddArg(n) return true } break @@ -146,6 +143,24 @@ func rewriteValuePPC64latelower_OpPPC64AND(v *Value) bool { } return false } +func rewriteValuePPC64latelower_OpPPC64ANDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ANDconst [m] x) + // cond: isPPC64ValidShiftMask(m) + // result: (RLDICL [encodePPC64RotateMask(0,m,64)] x) + for { + m := auxIntToInt64(v.AuxInt) + x := v_0 + if !(isPPC64ValidShiftMask(m)) { + break + } + v.reset(OpPPC64RLDICL) + v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64)) + v.AddArg(x) + return true + } + return false +} func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool { v_0 := v.Args[0] // match: (CMPconst [0] z:(ADD x y)) @@ -319,6 +334,25 @@ func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool { v.AddArg(convertPPC64OpToOpCC(z)) return true } + // match: (CMPconst [0] z:(RLDICL x)) + // cond: v.Block == z.Block + // result: (CMPconst [0] convertPPC64OpToOpCC(z)) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + z := v_0 + if z.Op != OpPPC64RLDICL { + break + } + if !(v.Block == z.Block) { + break + } + v.reset(OpPPC64CMPconst) + v.AuxInt = int64ToAuxInt(0) + v.AddArg(convertPPC64OpToOpCC(z)) + return true + } // match: (CMPconst [0] z:(ADDconst [c] x)) // cond: int64(int16(c)) == c && v.Block == z.Block // result: (CMPconst [0] convertPPC64OpToOpCC(z)) @@ -339,6 +373,26 @@ func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool { v.AddArg(convertPPC64OpToOpCC(z)) return true } + // match: (CMPconst [0] z:(ANDconst [c] x)) + // cond: int64(uint16(c)) == c && v.Block == z.Block + // result: (CMPconst [0] convertPPC64OpToOpCC(z)) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + z := v_0 + if z.Op != OpPPC64ANDconst { + break + } + c := auxIntToInt64(z.AuxInt) + if !(int64(uint16(c)) == c && v.Block == z.Block) { + break + } + v.reset(OpPPC64CMPconst) + v.AuxInt = int64ToAuxInt(0) + v.AddArg(convertPPC64OpToOpCC(z)) + return true + } // match: (CMPconst [0] (Select0 z:(ADDCC x y))) // result: (Select1 z) for { @@ -467,6 +521,22 @@ func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool { v.AddArg(z) return true } + // match: (CMPconst [0] (Select0 z:(ANDCCconst y))) + // result: (Select1 z) + for { + t := v.Type + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64ANDCCconst { + break + } + v.reset(OpSelect1) + v.Type = t + v.AddArg(z) + return true + } // match: (CMPconst [0] (Select0 z:(NEGCC y))) // result: (Select1 z) for { @@ -499,6 +569,22 @@ func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool { v.AddArg(z) return true } + // match: (CMPconst [0] (Select0 z:(RLDICLCC y))) + // result: (Select1 z) + for { + t := v.Type + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64RLDICLCC { + break + } + v.reset(OpSelect1) + v.Type = t + v.AddArg(z) + return true + } return false } func rewriteValuePPC64latelower_OpPPC64ISEL(v *Value) bool { @@ -558,6 +644,24 @@ func rewriteValuePPC64latelower_OpPPC64RLDICL(v *Value) bool { } return false } +func rewriteValuePPC64latelower_OpPPC64RLDICLCC(v *Value) bool { + v_0 := v.Args[0] + // match: (RLDICLCC [a] x) + // cond: convertPPC64RldiclAndccconst(a) != 0 + // result: (ANDCCconst [convertPPC64RldiclAndccconst(a)] x) + for { + a := auxIntToInt64(v.AuxInt) + x := v_0 + if !(convertPPC64RldiclAndccconst(a) != 0) { + break + } + v.reset(OpPPC64ANDCCconst) + v.AuxInt = int64ToAuxInt(convertPPC64RldiclAndccconst(a)) + v.AddArg(x) + return true + } + return false +} func rewriteValuePPC64latelower_OpPPC64SETBC(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -678,28 +782,6 @@ func rewriteValuePPC64latelower_OpPPC64SETBCR(v *Value) bool { } return false } -func rewriteValuePPC64latelower_OpSelect0(v *Value) bool { - v_0 := v.Args[0] - // match: (Select0 z:(ANDCCconst [m] x)) - // cond: z.Uses == 1 && isPPC64ValidShiftMask(m) - // result: (RLDICL [encodePPC64RotateMask(0,m,64)] x) - for { - z := v_0 - if z.Op != OpPPC64ANDCCconst { - break - } - m := auxIntToInt64(z.AuxInt) - x := z.Args[0] - if !(z.Uses == 1 && isPPC64ValidShiftMask(m)) { - break - } - v.reset(OpPPC64RLDICL) - v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64)) - v.AddArg(x) - return true - } - return false -} func rewriteBlockPPC64latelower(b *Block) bool { return false }