From: Lynn Boger Date: Thu, 18 Aug 2022 20:35:38 +0000 (-0500) Subject: cmd/compile: leverage cc ops in more cases on ppc64x X-Git-Tag: go1.20rc1~713 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=d0b0b10b5cbb28d53403c2bd6af343581327e946;p=gostls13.git cmd/compile: leverage cc ops in more cases on ppc64x This updates some rules to use ops with CC variations to set the condition code when the result of the operation is zero. This allows the following compare with zero to be removed since the equivalent condition code has already been set. In addition, a previous rule change to use ANDCCconst was modified to allow any constant value, not just 1 in some cases. Improvements in the reflect package benchmarks: DeepEqual/int8-4 23.9ns ± 1% 23.1ns ± 1% -3.57% (p=0.029 n=4+4) DeepEqual/[]int8-4 109ns ± 2% 102ns ± 1% -6.67% (p=0.029 n=4+4) DeepEqual/int16-4 23.8ns ± 1% 22.8ns ± 0% -3.97% (p=0.029 n=4+4) DeepEqual/[]int16-4 108ns ± 1% 102ns ± 0% -6.25% (p=0.029 n=4+4) DeepEqual/int32-4 24.9ns ± 3% 23.6ns ± 0% -5.09% (p=0.029 n=4+4) DeepEqual/[]int32-4 109ns ± 1% 103ns ± 0% -5.64% (p=0.029 n=4+4) DeepEqual/int64-4 25.5ns ± 1% 23.7ns ± 0% -7.03% (p=0.029 n=4+4) DeepEqual/[]int64-4 109ns ± 1% 102ns ± 0% -6.73% (p=0.029 n=4+4) DeepEqual/int-4 23.2ns ± 1% 22.7ns ± 0% -2.05% (p=0.029 n=4+4) DeepEqual/[]int-4 109ns ± 3% 101ns ± 0% -7.18% (p=0.029 n=4+4) DeepEqual/uint8-4 23.9ns ± 1% 23.5ns ± 0% -1.69% (p=0.029 n=4+4) DeepEqual/[]uint8-4 89.1ns ± 0% 85.6ns ± 1% -3.95% (p=0.029 n=4+4) DeepEqual/uint16-4 24.0ns ± 1% 23.8ns ± 0% -0.76% (p=0.343 n=4+4) DeepEqual/[]uint16-4 111ns ± 0% 106ns ± 4% -4.74% (p=0.029 n=4+4) DeepEqual/uint32-4 23.5ns ± 1% 23.0ns ± 0% -2.15% (p=0.029 n=4+4) DeepEqual/[]uint32-4 110ns ± 1% 104ns ± 0% -5.66% (p=0.029 n=4+4) DeepEqual/uint64-4 24.6ns ± 1% 24.3ns ± 0% -1.10% (p=0.143 n=4+4) DeepEqual/[]uint64-4 111ns ± 0% 105ns ± 1% -5.16% (p=0.029 n=4+4) DeepEqual/uint-4 23.6ns ± 0% 23.0ns ± 0% -2.70% (p=0.029 n=4+4) DeepEqual/[]uint-4 109ns ± 0% 103ns ± 1% -5.74% (p=0.029 n=4+4) DeepEqual/uintptr-4 25.1ns ± 1% 24.8ns ± 2% -1.11% (p=0.171 n=4+4) DeepEqual/[]uintptr-4 111ns ± 0% 106ns ± 1% -4.45% (p=0.029 n=4+4) DeepEqual/float32-4 22.5ns ± 0% 22.2ns ± 0% -1.29% (p=0.029 n=4+4) DeepEqual/[]float32-4 105ns ± 0% 101ns ± 1% -3.75% (p=0.029 n=4+4) DeepEqual/float64-4 22.7ns ± 2% 22.1ns ± 0% -2.52% (p=0.029 n=4+4) DeepEqual/[]float64-4 105ns ± 1% 103ns ± 1% -2.77% (p=0.029 n=4+4) DeepEqual/complex64-4 22.9ns ± 0% 22.8ns ± 0% -0.48% (p=0.029 n=4+4) DeepEqual/[]complex64-4 107ns ± 0% 101ns ± 0% -5.48% (p=0.029 n=4+4) DeepEqual/complex128-4 23.2ns ± 1% 22.6ns ± 0% -2.34% (p=0.029 n=4+4) DeepEqual/[]complex128-4 107ns ± 0% 101ns ± 0% -5.60% (p=0.029 n=4+4) DeepEqual/bool-4 22.0ns ± 1% 21.7ns ± 0% -1.44% (p=0.029 n=4+4) DeepEqual/[]bool-4 106ns ± 1% 100ns ± 0% -5.42% (p=0.029 n=4+4) DeepEqual/string-4 26.7ns ± 1% 24.7ns ± 0% -7.47% (p=0.029 n=4+4) DeepEqual/[]string-4 112ns ± 0% 107ns ± 0% -4.21% (p=0.029 n=4+4) DeepEqual/[]uint8#01-4 89.4ns ± 1% 85.5ns ± 1% -4.44% (p=0.029 n=4+4) DeepEqual/[][]uint8-4 177ns ± 0% 173ns ± 1% -2.22% (p=0.029 n=4+4) DeepEqual/[6]uint8-4 137ns ± 1% 137ns ± 0% -0.56% (p=0.057 n=4+4) DeepEqual/[][6]uint8-4 232ns ± 0% 230ns ± 1% -1.09% (p=0.029 n=4+4) Change-Id: I275624e21dc4d70001032be48897f1504cbfdd1c Reviewed-on: https://go-review.googlesource.com/c/go/+/427634 Reviewed-by: Paul Murphy Reviewed-by: Dmitri Shuralyov Reviewed-by: Than McIntosh Reviewed-by: Archana Ravindar Run-TryBot: Lynn Boger TryBot-Result: Gopher Robot --- diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index 429da7dcd5..aa3620f56b 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -611,7 +611,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Reg = r2 p.Reg = r1 p.To.Type = obj.TYPE_REG - p.To.Reg = ppc64.REGTMP // result is not needed + p.To.Reg = v.Reg0() case ssa.OpPPC64ROTLconst, ssa.OpPPC64ROTLWconst: p := s.Prog(v.Op.Asm()) diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64.rules b/src/cmd/compile/internal/ssa/_gen/PPC64.rules index 7e18def938..79e633e3e4 100644 --- a/src/cmd/compile/internal/ssa/_gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/_gen/PPC64.rules @@ -404,9 +404,9 @@ // Elide compares of bit tests ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 (ANDCCconst [c] x)) yes no) ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 (ANDCCconst [c] x)) yes no) -((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (ANDCC x y) yes no) -((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (ORCC x y) yes no) -((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (XORCC x y) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 (ANDCC x y)) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 (ORCC x y)) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 (XORCC x y)) yes no) // Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably. (CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (CMPWconst [0] bool)) @@ -907,11 +907,24 @@ (ISEL [4] x _ (Flag(EQ|GT))) => x (ISEL [4] _ y (FlagLT)) => y -(ISEL [2] x y ((CMP|CMPW)const [0] (Select0 (ANDCCconst [1] z)))) => (ISEL [2] x y (Select1 (ANDCCconst [1] z ))) -(ISEL [6] x y ((CMP|CMPW)const [0] (Select0 (ANDCCconst [1] z)))) => (ISEL [6] x y (Select1 (ANDCCconst [1] z ))) +(ISEL [2] x y ((CMP|CMPW)const [0] (Select0 (ANDCCconst [n] z)))) => (ISEL [2] x y (Select1 (ANDCCconst [n] z ))) +(ISEL [6] x y ((CMP|CMPW)const [0] (Select0 (ANDCCconst [n] z)))) => (ISEL [6] x y (Select1 (ANDCCconst [n] z ))) (ISELB [2] x ((CMP|CMPW)const [0] (Select0 (ANDCCconst [1] z)))) => (XORconst [1] (Select0 (ANDCCconst [1] z ))) (ISELB [6] x ((CMP|CMPW)const [0] (Select0 (ANDCCconst [1] z)))) => (Select0 (ANDCCconst [1] z )) +(ISELB [2] x (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) => (ISELB [2] x (Select1 (ANDCCconst [n] z ))) +(ISELB [6] x (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) => (ISELB [6] x (Select1 (ANDCCconst [n] z ))) + +// Only CMPconst for these in case AND|OR|XOR result is > 32 bits +(ISELB [2] x (CMPconst [0] a:(AND y z))) && a.Uses == 1 => (ISELB [2] x (Select1 (ANDCC y z ))) +(ISELB [6] x (CMPconst [0] a:(AND y z))) && a.Uses == 1 => (ISELB [6] x (Select1 (ANDCC y z ))) + +(ISELB [2] x (CMPconst [0] o:(OR y z))) && o.Uses == 1 => (ISELB [2] x (Select1 (ORCC y z ))) +(ISELB [6] x (CMPconst [0] o:(OR y z))) && o.Uses == 1 => (ISELB [6] x (Select1 (ORCC y z ))) + +(ISELB [2] x (CMPconst [0] a:(XOR y z))) && a.Uses == 1 => (ISELB [2] x (Select1 (XORCC y z ))) +(ISELB [6] x (CMPconst [0] a:(XOR y z))) && a.Uses == 1 => (ISELB [6] x (Select1 (XORCC y z ))) + (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 0 => (ISELB [n+1] (MOVDconst [1]) bool) (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 1 => (ISELB [n-1] (MOVDconst [1]) bool) (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 2 => (ISELB [n] (MOVDconst [1]) bool) diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go index 3a87c1d2a7..fe129c6467 100644 --- a/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go @@ -284,27 +284,27 @@ func init() { {name: "MFVSRD", argLength: 1, reg: fpgp, asm: "MFVSRD", typ: "Int64"}, // move 64 bits of F register into G register {name: "MTVSRD", argLength: 1, reg: gpfp, asm: "MTVSRD", typ: "Float64"}, // move 64 bits of G register into F register - {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0&arg1 - {name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"}, // arg0&^arg1 - {name: "ANDCC", argLength: 2, reg: gp2cr, asm: "ANDCC", commutative: true, typ: "Flags"}, // arg0&arg1 sets CC - {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0|arg1 - {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0|^arg1 - {name: "ORCC", argLength: 2, reg: gp2cr, asm: "ORCC", commutative: true, typ: "Flags"}, // arg0|arg1 sets CC - {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0|arg1) - {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", typ: "Int64", commutative: true}, // arg0^arg1 - {name: "XORCC", argLength: 2, reg: gp2cr, asm: "XORCC", commutative: true, typ: "Flags"}, // arg0^arg1 sets CC - {name: "EQV", argLength: 2, reg: gp21, asm: "EQV", typ: "Int64", commutative: true}, // arg0^^arg1 - {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0 (integer) - {name: "FNEG", argLength: 1, reg: fp11, asm: "FNEG"}, // -arg0 (floating point) - {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) (floating point) - {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0) (floating point, single precision) - {name: "FFLOOR", argLength: 1, reg: fp11, asm: "FRIM"}, // floor(arg0), float64 - {name: "FCEIL", argLength: 1, reg: fp11, asm: "FRIP"}, // ceil(arg0), float64 - {name: "FTRUNC", argLength: 1, reg: fp11, asm: "FRIZ"}, // trunc(arg0), float64 - {name: "FROUND", argLength: 1, reg: fp11, asm: "FRIN"}, // round(arg0), float64 - {name: "FABS", argLength: 1, reg: fp11, asm: "FABS"}, // abs(arg0), float64 - {name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"}, // -abs(arg0), float64 - {name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"}, // copysign arg0 -> arg1, float64 + {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0&arg1 + {name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"}, // arg0&^arg1 + {name: "ANDCC", argLength: 2, reg: gp21, asm: "ANDCC", commutative: true, clobberFlags: true, typ: "(Int64,Flags)"}, // arg0&arg1 sets CC + {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0|arg1 + {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0|^arg1 + {name: "ORCC", argLength: 2, reg: gp21, asm: "ORCC", commutative: true, clobberFlags: true, typ: "(Int,Flags)"}, // arg0|arg1 sets CC + {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0|arg1) + {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", typ: "Int64", commutative: true}, // arg0^arg1 + {name: "XORCC", argLength: 2, reg: gp21, asm: "XORCC", commutative: true, clobberFlags: true, typ: "(Int,Flags)"}, // arg0^arg1 sets CC + {name: "EQV", argLength: 2, reg: gp21, asm: "EQV", typ: "Int64", commutative: true}, // arg0^^arg1 + {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0 (integer) + {name: "FNEG", argLength: 1, reg: fp11, asm: "FNEG"}, // -arg0 (floating point) + {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) (floating point) + {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0) (floating point, single precision) + {name: "FFLOOR", argLength: 1, reg: fp11, asm: "FRIM"}, // floor(arg0), float64 + {name: "FCEIL", argLength: 1, reg: fp11, asm: "FRIP"}, // ceil(arg0), float64 + {name: "FTRUNC", argLength: 1, reg: fp11, asm: "FRIZ"}, // trunc(arg0), float64 + {name: "FROUND", argLength: 1, reg: fp11, asm: "FRIN"}, // round(arg0), float64 + {name: "FABS", argLength: 1, reg: fp11, asm: "FABS"}, // abs(arg0), float64 + {name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"}, // -abs(arg0), float64 + {name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"}, // copysign arg0 -> arg1, float64 {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 162955675f..dc0f0386b0 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -28842,15 +28842,19 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ANDCC", - argLen: 2, - commutative: true, - asm: ppc64.AANDCC, + name: "ANDCC", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: ppc64.AANDCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { @@ -28883,15 +28887,19 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ORCC", - argLen: 2, - commutative: true, - asm: ppc64.AORCC, + name: "ORCC", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: ppc64.AORCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { @@ -28925,15 +28933,19 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "XORCC", - argLen: 2, - commutative: true, - asm: ppc64.AXORCC, + name: "XORCC", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: ppc64.AXORCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index d3cf9646b7..890e7423e1 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -5573,8 +5573,8 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { v.copyOf(y) return true } - // match: (ISEL [2] x y (CMPconst [0] (Select0 (ANDCCconst [1] z)))) - // result: (ISEL [2] x y (Select1 (ANDCCconst [1] z ))) + // match: (ISEL [2] x y (CMPconst [0] (Select0 (ANDCCconst [n] z)))) + // result: (ISEL [2] x y (Select1 (ANDCCconst [n] z ))) for { if auxIntToInt32(v.AuxInt) != 2 { break @@ -5589,22 +5589,23 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { break } v_2_0_0 := v_2_0.Args[0] - if v_2_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0_0.AuxInt) != 1 { + if v_2_0_0.Op != OpPPC64ANDCCconst { break } + n := auxIntToInt64(v_2_0_0.AuxInt) z := v_2_0_0.Args[0] v.reset(OpPPC64ISEL) v.AuxInt = int32ToAuxInt(2) v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v1.AuxInt = int64ToAuxInt(1) + v1.AuxInt = int64ToAuxInt(n) v1.AddArg(z) v0.AddArg(v1) v.AddArg3(x, y, v0) return true } - // match: (ISEL [2] x y (CMPWconst [0] (Select0 (ANDCCconst [1] z)))) - // result: (ISEL [2] x y (Select1 (ANDCCconst [1] z ))) + // match: (ISEL [2] x y (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) + // result: (ISEL [2] x y (Select1 (ANDCCconst [n] z ))) for { if auxIntToInt32(v.AuxInt) != 2 { break @@ -5619,22 +5620,23 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { break } v_2_0_0 := v_2_0.Args[0] - if v_2_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0_0.AuxInt) != 1 { + if v_2_0_0.Op != OpPPC64ANDCCconst { break } + n := auxIntToInt64(v_2_0_0.AuxInt) z := v_2_0_0.Args[0] v.reset(OpPPC64ISEL) v.AuxInt = int32ToAuxInt(2) v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v1.AuxInt = int64ToAuxInt(1) + v1.AuxInt = int64ToAuxInt(n) v1.AddArg(z) v0.AddArg(v1) v.AddArg3(x, y, v0) return true } - // match: (ISEL [6] x y (CMPconst [0] (Select0 (ANDCCconst [1] z)))) - // result: (ISEL [6] x y (Select1 (ANDCCconst [1] z ))) + // match: (ISEL [6] x y (CMPconst [0] (Select0 (ANDCCconst [n] z)))) + // result: (ISEL [6] x y (Select1 (ANDCCconst [n] z ))) for { if auxIntToInt32(v.AuxInt) != 6 { break @@ -5649,22 +5651,23 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { break } v_2_0_0 := v_2_0.Args[0] - if v_2_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0_0.AuxInt) != 1 { + if v_2_0_0.Op != OpPPC64ANDCCconst { break } + n := auxIntToInt64(v_2_0_0.AuxInt) z := v_2_0_0.Args[0] v.reset(OpPPC64ISEL) v.AuxInt = int32ToAuxInt(6) v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v1.AuxInt = int64ToAuxInt(1) + v1.AuxInt = int64ToAuxInt(n) v1.AddArg(z) v0.AddArg(v1) v.AddArg3(x, y, v0) return true } - // match: (ISEL [6] x y (CMPWconst [0] (Select0 (ANDCCconst [1] z)))) - // result: (ISEL [6] x y (Select1 (ANDCCconst [1] z ))) + // match: (ISEL [6] x y (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) + // result: (ISEL [6] x y (Select1 (ANDCCconst [n] z ))) for { if auxIntToInt32(v.AuxInt) != 6 { break @@ -5679,15 +5682,16 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { break } v_2_0_0 := v_2_0.Args[0] - if v_2_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0_0.AuxInt) != 1 { + if v_2_0_0.Op != OpPPC64ANDCCconst { break } + n := auxIntToInt64(v_2_0_0.AuxInt) z := v_2_0_0.Args[0] v.reset(OpPPC64ISEL) v.AuxInt = int32ToAuxInt(6) v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v1.AuxInt = int64ToAuxInt(1) + v1.AuxInt = int64ToAuxInt(n) v1.AddArg(z) v0.AddArg(v1) v.AddArg3(x, y, v0) @@ -6045,6 +6049,240 @@ func rewriteValuePPC64_OpPPC64ISELB(v *Value) bool { v.AddArg(v0) return true } + // match: (ISELB [2] x (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) + // result: (ISELB [2] x (Select1 (ANDCCconst [n] z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 { + break + } + x := v_0 + if v_1.Op != OpPPC64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpSelect0 { + break + } + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpPPC64ANDCCconst { + break + } + n := auxIntToInt64(v_1_0_0.AuxInt) + z := v_1_0_0.Args[0] + v.reset(OpPPC64ISELB) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(n) + v1.AddArg(z) + v0.AddArg(v1) + v.AddArg2(x, v0) + return true + } + // match: (ISELB [6] x (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) + // result: (ISELB [6] x (Select1 (ANDCCconst [n] z ))) + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + x := v_0 + if v_1.Op != OpPPC64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpSelect0 { + break + } + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpPPC64ANDCCconst { + break + } + n := auxIntToInt64(v_1_0_0.AuxInt) + z := v_1_0_0.Args[0] + v.reset(OpPPC64ISELB) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(n) + v1.AddArg(z) + v0.AddArg(v1) + v.AddArg2(x, v0) + return true + } + // match: (ISELB [2] x (CMPconst [0] a:(AND y z))) + // cond: a.Uses == 1 + // result: (ISELB [2] x (Select1 (ANDCC y z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 { + break + } + x := v_0 + if v_1.Op != OpPPC64CMPconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + a := v_1.Args[0] + if a.Op != OpPPC64AND { + break + } + z := a.Args[1] + y := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpPPC64ISELB) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags)) + v1.AddArg2(y, z) + v0.AddArg(v1) + v.AddArg2(x, v0) + return true + } + // match: (ISELB [6] x (CMPconst [0] a:(AND y z))) + // cond: a.Uses == 1 + // result: (ISELB [6] x (Select1 (ANDCC y z ))) + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + x := v_0 + if v_1.Op != OpPPC64CMPconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + a := v_1.Args[0] + if a.Op != OpPPC64AND { + break + } + z := a.Args[1] + y := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpPPC64ISELB) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags)) + v1.AddArg2(y, z) + v0.AddArg(v1) + v.AddArg2(x, v0) + return true + } + // match: (ISELB [2] x (CMPconst [0] o:(OR y z))) + // cond: o.Uses == 1 + // result: (ISELB [2] x (Select1 (ORCC y z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 { + break + } + x := v_0 + if v_1.Op != OpPPC64CMPconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + o := v_1.Args[0] + if o.Op != OpPPC64OR { + break + } + z := o.Args[1] + y := o.Args[0] + if !(o.Uses == 1) { + break + } + v.reset(OpPPC64ISELB) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(y, z) + v0.AddArg(v1) + v.AddArg2(x, v0) + return true + } + // match: (ISELB [6] x (CMPconst [0] o:(OR y z))) + // cond: o.Uses == 1 + // result: (ISELB [6] x (Select1 (ORCC y z ))) + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + x := v_0 + if v_1.Op != OpPPC64CMPconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + o := v_1.Args[0] + if o.Op != OpPPC64OR { + break + } + z := o.Args[1] + y := o.Args[0] + if !(o.Uses == 1) { + break + } + v.reset(OpPPC64ISELB) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(y, z) + v0.AddArg(v1) + v.AddArg2(x, v0) + return true + } + // match: (ISELB [2] x (CMPconst [0] a:(XOR y z))) + // cond: a.Uses == 1 + // result: (ISELB [2] x (Select1 (XORCC y z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 { + break + } + x := v_0 + if v_1.Op != OpPPC64CMPconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + a := v_1.Args[0] + if a.Op != OpPPC64XOR { + break + } + z := a.Args[1] + y := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpPPC64ISELB) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(y, z) + v0.AddArg(v1) + v.AddArg2(x, v0) + return true + } + // match: (ISELB [6] x (CMPconst [0] a:(XOR y z))) + // cond: a.Uses == 1 + // result: (ISELB [6] x (Select1 (XORCC y z ))) + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + x := v_0 + if v_1.Op != OpPPC64CMPconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + a := v_1.Args[0] + if a.Op != OpPPC64XOR { + break + } + z := a.Args[1] + y := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpPPC64ISELB) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(y, z) + v0.AddArg(v1) + v.AddArg2(x, v0) + return true + } // match: (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) // cond: n%4 == 0 // result: (ISELB [n+1] (MOVDconst [1]) bool) @@ -16273,7 +16511,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (EQ (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 - // result: (EQ (ANDCC x y) yes no) + // result: (EQ (Select1 (ANDCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -16292,8 +16530,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64EQ, v0) return true } @@ -16301,7 +16541,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (EQ (CMPconst [0] z:(OR x y)) yes no) // cond: z.Uses == 1 - // result: (EQ (ORCC x y) yes no) + // result: (EQ (Select1 (ORCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -16320,8 +16560,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64EQ, v0) return true } @@ -16329,7 +16571,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (EQ (CMPconst [0] z:(XOR x y)) yes no) // cond: z.Uses == 1 - // result: (EQ (XORCC x y) yes no) + // result: (EQ (Select1 (XORCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -16348,8 +16590,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64EQ, v0) return true } @@ -16435,7 +16679,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (GE (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 - // result: (GE (ANDCC x y) yes no) + // result: (GE (Select1 (ANDCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -16454,8 +16698,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64GE, v0) return true } @@ -16463,7 +16709,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (GE (CMPconst [0] z:(OR x y)) yes no) // cond: z.Uses == 1 - // result: (GE (ORCC x y) yes no) + // result: (GE (Select1 (ORCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -16482,8 +16728,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64GE, v0) return true } @@ -16491,7 +16739,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (GE (CMPconst [0] z:(XOR x y)) yes no) // cond: z.Uses == 1 - // result: (GE (XORCC x y) yes no) + // result: (GE (Select1 (XORCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -16510,8 +16758,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64GE, v0) return true } @@ -16598,7 +16848,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (GT (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 - // result: (GT (ANDCC x y) yes no) + // result: (GT (Select1 (ANDCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -16617,8 +16867,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64GT, v0) return true } @@ -16626,7 +16878,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (GT (CMPconst [0] z:(OR x y)) yes no) // cond: z.Uses == 1 - // result: (GT (ORCC x y) yes no) + // result: (GT (Select1 (ORCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -16645,8 +16897,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64GT, v0) return true } @@ -16654,7 +16908,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (GT (CMPconst [0] z:(XOR x y)) yes no) // cond: z.Uses == 1 - // result: (GT (XORCC x y) yes no) + // result: (GT (Select1 (XORCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -16673,8 +16927,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64GT, v0) return true } @@ -16856,7 +17112,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (LE (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 - // result: (LE (ANDCC x y) yes no) + // result: (LE (Select1 (ANDCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -16875,8 +17131,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64LE, v0) return true } @@ -16884,7 +17142,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (LE (CMPconst [0] z:(OR x y)) yes no) // cond: z.Uses == 1 - // result: (LE (ORCC x y) yes no) + // result: (LE (Select1 (ORCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -16903,8 +17161,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64LE, v0) return true } @@ -16912,7 +17172,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (LE (CMPconst [0] z:(XOR x y)) yes no) // cond: z.Uses == 1 - // result: (LE (XORCC x y) yes no) + // result: (LE (Select1 (XORCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -16931,8 +17191,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64LE, v0) return true } @@ -17019,7 +17281,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (LT (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 - // result: (LT (ANDCC x y) yes no) + // result: (LT (Select1 (ANDCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -17038,8 +17300,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64LT, v0) return true } @@ -17047,7 +17311,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (LT (CMPconst [0] z:(OR x y)) yes no) // cond: z.Uses == 1 - // result: (LT (ORCC x y) yes no) + // result: (LT (Select1 (ORCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -17066,8 +17330,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64LT, v0) return true } @@ -17075,7 +17341,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (LT (CMPconst [0] z:(XOR x y)) yes no) // cond: z.Uses == 1 - // result: (LT (XORCC x y) yes no) + // result: (LT (Select1 (XORCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -17094,8 +17360,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64LT, v0) return true } @@ -17461,7 +17729,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (NE (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 - // result: (NE (ANDCC x y) yes no) + // result: (NE (Select1 (ANDCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -17480,8 +17748,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64NE, v0) return true } @@ -17489,7 +17759,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (NE (CMPconst [0] z:(OR x y)) yes no) // cond: z.Uses == 1 - // result: (NE (ORCC x y) yes no) + // result: (NE (Select1 (ORCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -17508,8 +17778,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64NE, v0) return true } @@ -17517,7 +17789,7 @@ func rewriteBlockPPC64(b *Block) bool { } // match: (NE (CMPconst [0] z:(XOR x y)) yes no) // cond: z.Uses == 1 - // result: (NE (XORCC x y) yes no) + // result: (NE (Select1 (XORCC x y)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { @@ -17536,8 +17808,10 @@ func rewriteBlockPPC64(b *Block) bool { if !(z.Uses == 1) { continue } - v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags) - v0.AddArg2(x, y) + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) b.resetWithControl(BlockPPC64NE, v0) return true }