// license that can be found in the LICENSE file.
// Lowering arithmetic
-(Add(Ptr|64|32|16|8) ...) -> (ADD ...)
-(Add64F ...) -> (FADD ...)
-(Add32F ...) -> (FADDS ...)
-
-(Sub(Ptr|64|32|16|8) ...) -> (SUB ...)
-(Sub32F ...) -> (FSUBS ...)
-(Sub64F ...) -> (FSUB ...)
-
-(Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y))
-(Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
-(Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y))
-(Mod8u x y) -> (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
-(Mod64 x y) -> (SUB x (MULLD y (DIVD x y)))
-(Mod64u x y) -> (SUB x (MULLD y (DIVDU x y)))
-(Mod32 x y) -> (SUB x (MULLW y (DIVW x y)))
-(Mod32u x y) -> (SUB x (MULLW y (DIVWU x y)))
-
-// (x + y) / 2 with x>=y -> (x - y) / 2 + y
-(Avg64u <t> x y) -> (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
-
-(Add64carry ...) -> (LoweredAdd64Carry ...)
-(Mul64 ...) -> (MULLD ...)
-(Mul(32|16|8) ...) -> (MULLW ...)
-(Mul64uhilo ...) -> (LoweredMuluhilo ...)
-
-(Div64 ...) -> (DIVD ...)
-(Div64u ...) -> (DIVDU ...)
-(Div32 ...) -> (DIVW ...)
-(Div32u ...) -> (DIVWU ...)
-(Div16 x y) -> (DIVW (SignExt16to32 x) (SignExt16to32 y))
-(Div16u x y) -> (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
-(Div8 x y) -> (DIVW (SignExt8to32 x) (SignExt8to32 y))
-(Div8u x y) -> (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
-
-(Hmul(64|64u|32|32u) ...) -> (MULH(D|DU|W|WU) ...)
-
-(Mul32F ...) -> (FMULS ...)
-(Mul64F ...) -> (FMUL ...)
-
-(Div32F ...) -> (FDIVS ...)
-(Div64F ...) -> (FDIV ...)
-
-// Lowering float <-> int
-(Cvt32to32F x) -> (FCFIDS (MTVSRD (SignExt32to64 x)))
-(Cvt32to64F x) -> (FCFID (MTVSRD (SignExt32to64 x)))
-(Cvt64to32F x) -> (FCFIDS (MTVSRD x))
-(Cvt64to64F x) -> (FCFID (MTVSRD x))
-
-(Cvt32Fto32 x) -> (MFVSRD (FCTIWZ x))
-(Cvt32Fto64 x) -> (MFVSRD (FCTIDZ x))
-(Cvt64Fto32 x) -> (MFVSRD (FCTIWZ x))
-(Cvt64Fto64 x) -> (MFVSRD (FCTIDZ x))
-
-(Cvt32Fto64F ...) -> (Copy ...) // Note v will have the wrong type for patterns dependent on Float32/Float64
-(Cvt64Fto32F ...) -> (FRSP ...)
-
-(CvtBoolToUint8 ...) -> (Copy ...)
-
-(Round(32|64)F ...) -> (LoweredRound(32|64)F ...)
-
-(Sqrt ...) -> (FSQRT ...)
-(Floor ...) -> (FFLOOR ...)
-(Ceil ...) -> (FCEIL ...)
-(Trunc ...) -> (FTRUNC ...)
-(Round ...) -> (FROUND ...)
-(Copysign x y) -> (FCPSGN y x)
-(Abs ...) -> (FABS ...)
-(FMA ...) -> (FMADD ...)
+(Add(Ptr|64|32|16|8) ...) => (ADD ...)
+(Add64F ...) => (FADD ...)
+(Add32F ...) => (FADDS ...)
+
+(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
+(Sub32F ...) => (FSUBS ...)
+(Sub64F ...) => (FSUB ...)
+
+(Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Mod64 x y) => (SUB x (MULLD y (DIVD x y)))
+(Mod64u x y) => (SUB x (MULLD y (DIVDU x y)))
+(Mod32 x y) => (SUB x (MULLW y (DIVW x y)))
+(Mod32u x y) => (SUB x (MULLW y (DIVWU x y)))
+
+// (x + y) / 2 with x>=y => (x - y) / 2 + y
+(Avg64u <t> x y) => (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
+
+(Add64carry ...) => (LoweredAdd64Carry ...)
+(Mul64 ...) => (MULLD ...)
+(Mul(32|16|8) ...) => (MULLW ...)
+(Mul64uhilo ...) => (LoweredMuluhilo ...)
+
+(Div64 [false] x y) => (DIVD x y)
+(Div64u ...) => (DIVDU ...)
+(Div32 [false] x y) => (DIVW x y)
+(Div32u ...) => (DIVWU ...)
+(Div16 [false] x y) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) => (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) => (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(Hmul(64|64u|32|32u) ...) => (MULH(D|DU|W|WU) ...)
+
+(Mul32F ...) => (FMULS ...)
+(Mul64F ...) => (FMUL ...)
+
+(Div32F ...) => (FDIVS ...)
+(Div64F ...) => (FDIV ...)
+
+// Lowering float <=> int
+(Cvt32to32F x) => (FCFIDS (MTVSRD (SignExt32to64 x)))
+(Cvt32to64F x) => (FCFID (MTVSRD (SignExt32to64 x)))
+(Cvt64to32F x) => (FCFIDS (MTVSRD x))
+(Cvt64to64F x) => (FCFID (MTVSRD x))
+
+(Cvt32Fto32 x) => (MFVSRD (FCTIWZ x))
+(Cvt32Fto64 x) => (MFVSRD (FCTIDZ x))
+(Cvt64Fto32 x) => (MFVSRD (FCTIWZ x))
+(Cvt64Fto64 x) => (MFVSRD (FCTIDZ x))
+
+(Cvt32Fto64F ...) => (Copy ...) // Note v will have the wrong type for patterns dependent on Float32/Float64
+(Cvt64Fto32F ...) => (FRSP ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
+
+(Sqrt ...) => (FSQRT ...)
+(Floor ...) => (FFLOOR ...)
+(Ceil ...) => (FCEIL ...)
+(Trunc ...) => (FTRUNC ...)
+(Round ...) => (FROUND ...)
+(Copysign x y) => (FCPSGN y x)
+(Abs ...) => (FABS ...)
+(FMA ...) => (FMADD ...)
// Lowering constants
-(Const(64|32|16|8) ...) -> (MOVDconst ...)
-(Const(32|64)F ...) -> (FMOV(S|D)const ...)
-(ConstNil) -> (MOVDconst [0])
-(ConstBool ...) -> (MOVDconst ...)
+(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
+(Const(32|64)F ...) => (FMOV(S|D)const ...)
+(ConstNil) => (MOVDconst [0])
+(ConstBool [b]) => (MOVDconst [b2i(b)])
// Constant folding
-(FABS (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))])
-(FSQRT (FMOVDconst [x])) && auxTo64F(x) >= 0 -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
-(FFLOOR (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))])
-(FCEIL (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))])
-(FTRUNC (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))])
+(FABS (FMOVDconst [x])) => (FMOVDconst [math.Abs(x)])
+(FSQRT (FMOVDconst [x])) && x >= 0 => (FMOVDconst [math.Sqrt(x)])
+(FFLOOR (FMOVDconst [x])) => (FMOVDconst [math.Floor(x)])
+(FCEIL (FMOVDconst [x])) => (FMOVDconst [math.Ceil(x)])
+(FTRUNC (FMOVDconst [x])) => (FMOVDconst [math.Trunc(x)])
// Rotates
-(RotateLeft8 <t> x (MOVDconst [c])) -> (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
-(RotateLeft16 <t> x (MOVDconst [c])) -> (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
-(RotateLeft32 x (MOVDconst [c])) -> (ROTLWconst [c&31] x)
-(RotateLeft64 x (MOVDconst [c])) -> (ROTLconst [c&63] x)
+(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+(RotateLeft32 x (MOVDconst [c])) => (ROTLWconst [c&31] x)
+(RotateLeft64 x (MOVDconst [c])) => (ROTLconst [c&63] x)
// Rotate generation with const shift
-(ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
-( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
-(XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
+(ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x)
+( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x)
+(XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x)
-(ADD (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
-( OR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
-(XOR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
+(ADD (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x)
+( OR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x)
+(XOR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x)
// Rotate generation with non-const shift
// these match patterns from math/bits/RotateLeft[32|64], but there could be others
-(ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
-( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
-(XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
+(ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
+( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
+(XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
-(ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
-( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
-(XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
+(ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
+( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
+(XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
// Lowering rotates
-(RotateLeft32 x y) -> (ROTLW x y)
-(RotateLeft64 x y) -> (ROTL x y)
+(RotateLeft32 x y) => (ROTLW x y)
+(RotateLeft64 x y) => (ROTL x y)
// Constant rotate generation
-(ROTLW x (MOVDconst [c])) -> (ROTLWconst x [c&31])
-(ROTL x (MOVDconst [c])) -> (ROTLconst x [c&63])
+(ROTLW x (MOVDconst [c])) => (ROTLWconst x [c&31])
+(ROTL x (MOVDconst [c])) => (ROTLconst x [c&63])
// large constant shifts
-(Lsh64x64 _ (MOVDconst [c])) && uint64(c) >= 64 -> (MOVDconst [0])
-(Rsh64Ux64 _ (MOVDconst [c])) && uint64(c) >= 64 -> (MOVDconst [0])
-(Lsh32x64 _ (MOVDconst [c])) && uint64(c) >= 32 -> (MOVDconst [0])
-(Rsh32Ux64 _ (MOVDconst [c])) && uint64(c) >= 32 -> (MOVDconst [0])
-(Lsh16x64 _ (MOVDconst [c])) && uint64(c) >= 16 -> (MOVDconst [0])
-(Rsh16Ux64 _ (MOVDconst [c])) && uint64(c) >= 16 -> (MOVDconst [0])
-(Lsh8x64 _ (MOVDconst [c])) && uint64(c) >= 8 -> (MOVDconst [0])
-(Rsh8Ux64 _ (MOVDconst [c])) && uint64(c) >= 8 -> (MOVDconst [0])
+(Lsh64x64 _ (MOVDconst [c])) && uint64(c) >= 64 => (MOVDconst [0])
+(Rsh64Ux64 _ (MOVDconst [c])) && uint64(c) >= 64 => (MOVDconst [0])
+(Lsh32x64 _ (MOVDconst [c])) && uint64(c) >= 32 => (MOVDconst [0])
+(Rsh32Ux64 _ (MOVDconst [c])) && uint64(c) >= 32 => (MOVDconst [0])
+(Lsh16x64 _ (MOVDconst [c])) && uint64(c) >= 16 => (MOVDconst [0])
+(Rsh16Ux64 _ (MOVDconst [c])) && uint64(c) >= 16 => (MOVDconst [0])
+(Lsh8x64 _ (MOVDconst [c])) && uint64(c) >= 8 => (MOVDconst [0])
+(Rsh8Ux64 _ (MOVDconst [c])) && uint64(c) >= 8 => (MOVDconst [0])
// large constant signed right shift, we leave the sign bit
-(Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 -> (SRADconst x [63])
-(Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 -> (SRAWconst x [63])
-(Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63])
-(Rsh8x64 x (MOVDconst [c])) && uint64(c) >= 8 -> (SRAWconst (SignExt8to32 x) [63])
+(Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 => (SRADconst x [63])
+(Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 => (SRAWconst x [63])
+(Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 => (SRAWconst (SignExt16to32 x) [63])
+(Rsh8x64 x (MOVDconst [c])) && uint64(c) >= 8 => (SRAWconst (SignExt8to32 x) [63])
// constant shifts
-(Lsh64x64 x (MOVDconst [c])) && uint64(c) < 64 -> (SLDconst x [c])
-(Rsh64x64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRADconst x [c])
-(Rsh64Ux64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRDconst x [c])
-(Lsh32x64 x (MOVDconst [c])) && uint64(c) < 32 -> (SLWconst x [c])
-(Rsh32x64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRAWconst x [c])
-(Rsh32Ux64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRWconst x [c])
-(Lsh16x64 x (MOVDconst [c])) && uint64(c) < 16 -> (SLWconst x [c])
-(Rsh16x64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
-(Rsh16Ux64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
-(Lsh8x64 x (MOVDconst [c])) && uint64(c) < 8 -> (SLWconst x [c])
-(Rsh8x64 x (MOVDconst [c])) && uint64(c) < 8 -> (SRAWconst (SignExt8to32 x) [c])
-(Rsh8Ux64 x (MOVDconst [c])) && uint64(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c])
-
-(Lsh64x32 x (MOVDconst [c])) && uint32(c) < 64 -> (SLDconst x [c])
-(Rsh64x32 x (MOVDconst [c])) && uint32(c) < 64 -> (SRADconst x [c])
-(Rsh64Ux32 x (MOVDconst [c])) && uint32(c) < 64 -> (SRDconst x [c])
-(Lsh32x32 x (MOVDconst [c])) && uint32(c) < 32 -> (SLWconst x [c])
-(Rsh32x32 x (MOVDconst [c])) && uint32(c) < 32 -> (SRAWconst x [c])
-(Rsh32Ux32 x (MOVDconst [c])) && uint32(c) < 32 -> (SRWconst x [c])
-(Lsh16x32 x (MOVDconst [c])) && uint32(c) < 16 -> (SLWconst x [c])
-(Rsh16x32 x (MOVDconst [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
-(Rsh16Ux32 x (MOVDconst [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
-(Lsh8x32 x (MOVDconst [c])) && uint32(c) < 8 -> (SLWconst x [c])
-(Rsh8x32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c])
-(Rsh8Ux32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c])
+(Lsh64x64 x (MOVDconst [c])) && uint64(c) < 64 => (SLDconst x [c])
+(Rsh64x64 x (MOVDconst [c])) && uint64(c) < 64 => (SRADconst x [c])
+(Rsh64Ux64 x (MOVDconst [c])) && uint64(c) < 64 => (SRDconst x [c])
+(Lsh32x64 x (MOVDconst [c])) && uint64(c) < 32 => (SLWconst x [c])
+(Rsh32x64 x (MOVDconst [c])) && uint64(c) < 32 => (SRAWconst x [c])
+(Rsh32Ux64 x (MOVDconst [c])) && uint64(c) < 32 => (SRWconst x [c])
+(Lsh16x64 x (MOVDconst [c])) && uint64(c) < 16 => (SLWconst x [c])
+(Rsh16x64 x (MOVDconst [c])) && uint64(c) < 16 => (SRAWconst (SignExt16to32 x) [c])
+(Rsh16Ux64 x (MOVDconst [c])) && uint64(c) < 16 => (SRWconst (ZeroExt16to32 x) [c])
+(Lsh8x64 x (MOVDconst [c])) && uint64(c) < 8 => (SLWconst x [c])
+(Rsh8x64 x (MOVDconst [c])) && uint64(c) < 8 => (SRAWconst (SignExt8to32 x) [c])
+(Rsh8Ux64 x (MOVDconst [c])) && uint64(c) < 8 => (SRWconst (ZeroExt8to32 x) [c])
+
+(Lsh64x32 x (MOVDconst [c])) && uint32(c) < 64 => (SLDconst x [c])
+(Rsh64x32 x (MOVDconst [c])) && uint32(c) < 64 => (SRADconst x [c])
+(Rsh64Ux32 x (MOVDconst [c])) && uint32(c) < 64 => (SRDconst x [c])
+(Lsh32x32 x (MOVDconst [c])) && uint32(c) < 32 => (SLWconst x [c])
+(Rsh32x32 x (MOVDconst [c])) && uint32(c) < 32 => (SRAWconst x [c])
+(Rsh32Ux32 x (MOVDconst [c])) && uint32(c) < 32 => (SRWconst x [c])
+(Lsh16x32 x (MOVDconst [c])) && uint32(c) < 16 => (SLWconst x [c])
+(Rsh16x32 x (MOVDconst [c])) && uint32(c) < 16 => (SRAWconst (SignExt16to32 x) [c])
+(Rsh16Ux32 x (MOVDconst [c])) && uint32(c) < 16 => (SRWconst (ZeroExt16to32 x) [c])
+(Lsh8x32 x (MOVDconst [c])) && uint32(c) < 8 => (SLWconst x [c])
+(Rsh8x32 x (MOVDconst [c])) && uint32(c) < 8 => (SRAWconst (SignExt8to32 x) [c])
+(Rsh8Ux32 x (MOVDconst [c])) && uint32(c) < 8 => (SRWconst (ZeroExt8to32 x) [c])
// Lower bounded shifts first. No need to check shift value.
-(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLD x y)
-(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLW x y)
-(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLW x y)
-(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLW x y)
-(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRD x y)
-(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW x y)
-(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW (MOVHZreg x) y)
-(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW (MOVBZreg x) y)
-(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAD x y)
-(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAW x y)
-(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAW (MOVHreg x) y)
-(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAW (MOVBreg x) y)
+(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y)
+(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD x y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW x y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVHZreg x) y)
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVBZreg x) y)
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAD x y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVHreg x) y)
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVBreg x) y)
// non-constant rotates
// These are subexpressions found in statements that can become rotates
// In these cases the shift count is known to be < 64 so the more complicated expressions
// with Mask & Carry is not needed
-(Lsh64x64 x (AND y (MOVDconst [63]))) -> (SLD x (ANDconst <typ.Int64> [63] y))
-(Lsh64x64 x (ANDconst <typ.Int64> [63] y)) -> (SLD x (ANDconst <typ.Int64> [63] y))
-(Rsh64Ux64 x (AND y (MOVDconst [63]))) -> (SRD x (ANDconst <typ.Int64> [63] y))
-(Rsh64Ux64 x (ANDconst <typ.UInt> [63] y)) -> (SRD x (ANDconst <typ.UInt> [63] y))
-(Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) -> (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
-(Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) -> (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
-(Rsh64x64 x (AND y (MOVDconst [63]))) -> (SRAD x (ANDconst <typ.Int64> [63] y))
-(Rsh64x64 x (ANDconst <typ.UInt> [63] y)) -> (SRAD x (ANDconst <typ.UInt> [63] y))
-(Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) -> (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
-(Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) -> (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
-
-(Lsh64x64 x y) -> (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
-(Rsh64x64 x y) -> (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
-(Rsh64Ux64 x y) -> (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
-
-(Lsh32x64 x (AND y (MOVDconst [31]))) -> (SLW x (ANDconst <typ.Int32> [31] y))
-(Lsh32x64 x (ANDconst <typ.Int32> [31] y)) -> (SLW x (ANDconst <typ.Int32> [31] y))
-
-(Rsh32Ux64 x (AND y (MOVDconst [31]))) -> (SRW x (ANDconst <typ.Int32> [31] y))
-(Rsh32Ux64 x (ANDconst <typ.UInt> [31] y)) -> (SRW x (ANDconst <typ.UInt> [31] y))
-(Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) -> (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
-(Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) -> (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
-
-(Rsh32x64 x (AND y (MOVDconst [31]))) -> (SRAW x (ANDconst <typ.Int32> [31] y))
-(Rsh32x64 x (ANDconst <typ.UInt> [31] y)) -> (SRAW x (ANDconst <typ.UInt> [31] y))
-(Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) -> (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
-(Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) -> (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
-
-(Rsh32x64 x y) -> (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
-(Rsh32Ux64 x y) -> (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
-(Lsh32x64 x y) -> (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
-
-(Rsh16x64 x y) -> (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
-(Rsh16Ux64 x y) -> (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
-(Lsh16x64 x y) -> (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
-
-(Rsh8x64 x y) -> (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
-(Rsh8Ux64 x y) -> (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
-(Lsh8x64 x y) -> (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
-
-(Rsh64x32 x y) -> (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
-(Rsh64Ux32 x y) -> (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
-(Lsh64x32 x y) -> (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
-(Rsh32x32 x y) -> (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
-(Rsh32Ux32 x y) -> (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
-(Lsh32x32 x y) -> (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
-
-(Rsh16x32 x y) -> (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
-(Rsh16Ux32 x y) -> (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
-(Lsh16x32 x y) -> (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
-
-(Rsh8x32 x y) -> (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
-(Rsh8Ux32 x y) -> (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
-(Lsh8x32 x y) -> (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
-
-
-(Rsh64x16 x y) -> (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
-(Rsh64Ux16 x y) -> (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
-(Lsh64x16 x y) -> (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
-
-(Rsh32x16 x y) -> (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
-(Rsh32Ux16 x y) -> (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
-(Lsh32x16 x y) -> (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
-
-(Rsh16x16 x y) -> (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
-(Rsh16Ux16 x y) -> (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
-(Lsh16x16 x y) -> (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
-
-(Rsh8x16 x y) -> (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
-(Rsh8Ux16 x y) -> (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
-(Lsh8x16 x y) -> (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
-
-
-(Rsh64x8 x y) -> (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
-(Rsh64Ux8 x y) -> (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
-(Lsh64x8 x y) -> (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
-
-(Rsh32x8 x y) -> (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
-(Rsh32Ux8 x y) -> (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
-(Lsh32x8 x y) -> (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
-
-(Rsh16x8 x y) -> (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
-(Rsh16Ux8 x y) -> (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
-(Lsh16x8 x y) -> (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
-
-(Rsh8x8 x y) -> (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
-(Rsh8Ux8 x y) -> (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
-(Lsh8x8 x y) -> (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
+(Lsh64x64 x (AND y (MOVDconst [63]))) => (SLD x (ANDconst <typ.Int64> [63] y))
+(Lsh64x64 x (ANDconst <typ.Int64> [63] y)) => (SLD x (ANDconst <typ.Int64> [63] y))
+(Rsh64Ux64 x (AND y (MOVDconst [63]))) => (SRD x (ANDconst <typ.Int64> [63] y))
+(Rsh64Ux64 x (ANDconst <typ.UInt> [63] y)) => (SRD x (ANDconst <typ.UInt> [63] y))
+(Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) => (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+(Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) => (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+(Rsh64x64 x (AND y (MOVDconst [63]))) => (SRAD x (ANDconst <typ.Int64> [63] y))
+(Rsh64x64 x (ANDconst <typ.UInt> [63] y)) => (SRAD x (ANDconst <typ.UInt> [63] y))
+(Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) => (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+(Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) => (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+
+(Lsh64x64 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+(Rsh64x64 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+(Rsh64Ux64 x y) => (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+
+(Lsh32x64 x (AND y (MOVDconst [31]))) => (SLW x (ANDconst <typ.Int32> [31] y))
+(Lsh32x64 x (ANDconst <typ.Int32> [31] y)) => (SLW x (ANDconst <typ.Int32> [31] y))
+
+(Rsh32Ux64 x (AND y (MOVDconst [31]))) => (SRW x (ANDconst <typ.Int32> [31] y))
+(Rsh32Ux64 x (ANDconst <typ.UInt> [31] y)) => (SRW x (ANDconst <typ.UInt> [31] y))
+(Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) => (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+(Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) => (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+
+(Rsh32x64 x (AND y (MOVDconst [31]))) => (SRAW x (ANDconst <typ.Int32> [31] y))
+(Rsh32x64 x (ANDconst <typ.UInt> [31] y)) => (SRAW x (ANDconst <typ.UInt> [31] y))
+(Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) => (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+(Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) => (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+
+(Rsh32x64 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+(Rsh32Ux64 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+(Lsh32x64 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+
+(Rsh16x64 x y) => (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+(Rsh16Ux64 x y) => (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+(Lsh16x64 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+
+(Rsh8x64 x y) => (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+(Rsh8Ux64 x y) => (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+(Lsh8x64 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+
+(Rsh64x32 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+(Rsh64Ux32 x y) => (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+(Lsh64x32 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+(Rsh32x32 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+(Rsh32Ux32 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+(Lsh32x32 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+
+(Rsh16x32 x y) => (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+(Rsh16Ux32 x y) => (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+(Lsh16x32 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+
+(Rsh8x32 x y) => (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+(Rsh8Ux32 x y) => (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+(Lsh8x32 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+
+
+(Rsh64x16 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
+(Rsh64Ux16 x y) => (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
+(Lsh64x16 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
+
+(Rsh32x16 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
+(Rsh32Ux16 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
+(Lsh32x16 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
+
+(Rsh16x16 x y) => (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
+(Rsh16Ux16 x y) => (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
+(Lsh16x16 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
+
+(Rsh8x16 x y) => (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
+(Rsh8Ux16 x y) => (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
+(Lsh8x16 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
+
+
+(Rsh64x8 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
+(Rsh64Ux8 x y) => (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
+(Lsh64x8 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
+
+(Rsh32x8 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
+(Rsh32Ux8 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
+(Lsh32x8 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
+
+(Rsh16x8 x y) => (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
+(Rsh16Ux8 x y) => (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
+(Lsh16x8 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
+
+(Rsh8x8 x y) => (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
+(Rsh8Ux8 x y) => (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
+(Lsh8x8 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
// Cleaning up shift ops when input is masked
-(MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1])
-(ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPU (ANDconst [d] y) (MOVDconst [c]))) && c >= d -> (ANDconst [d] y)
-(ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPUconst [c] (ANDconst [d] y))) && c >= d -> (ANDconst [d] y)
-(ORN x (MOVDconst [-1])) -> x
+(MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && int64(c) + d < 0 => (MOVDconst [-1])
+(ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPU (ANDconst [d] y) (MOVDconst [c]))) && c >= d => (ANDconst [d] y)
+(ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPUconst [c] (ANDconst [d] y))) && c >= d => (ANDconst [d] y)
+(ORN x (MOVDconst [-1])) => x
-(ADDconstForCarry [c] (MOVDconst [d])) && int64(int16(c)) < 0 && (int64(int16(c)) < 0 || int64(int16(c)) + d >= 0) -> (FlagCarryClear)
-(ADDconstForCarry [c] (MOVDconst [d])) && int64(int16(c)) < 0 && int64(int16(c)) >= 0 && int64(int16(c)) + d < 0 -> (FlagCarrySet)
+(ADDconstForCarry [c] (MOVDconst [d])) && c < 0 && (int64(c) < 0 || int64(c) + d >= 0) => (FlagCarryClear)
+(ADDconstForCarry [c] (MOVDconst [d])) && c < 0 && int64(c) >= 0 && int64(c) + d < 0 => (FlagCarrySet)
-(MaskIfNotCarry (FlagCarrySet)) -> (MOVDconst [0])
-(MaskIfNotCarry (FlagCarryClear)) -> (MOVDconst [-1])
+(MaskIfNotCarry (FlagCarrySet)) => (MOVDconst [0])
+(MaskIfNotCarry (FlagCarryClear)) => (MOVDconst [-1])
-(S(RAD|RAW|RD|RW|LD|LW) x (MOVDconst [c])) -> (S(RAD|RAW|RD|RW|LD|LW)const [c] x)
+(S(RAD|RAW|RD|RW|LD|LW) x (MOVDconst [c])) => (S(RAD|RAW|RD|RW|LD|LW)const [c] x)
-(Addr ...) -> (MOVDaddr ...)
-(LocalAddr {sym} base _) -> (MOVDaddr {sym} base)
-(OffPtr [off] ptr) -> (ADD (MOVDconst <typ.Int64> [off]) ptr)
+(Addr {sym} base) => (MOVDaddr {sym} [0] base)
+(LocalAddr {sym} base _) => (MOVDaddr {sym} base)
+(OffPtr [off] ptr) => (ADD (MOVDconst <typ.Int64> [off]) ptr)
// TODO: optimize these cases?
-(Ctz32NonZero ...) -> (Ctz32 ...)
-(Ctz64NonZero ...) -> (Ctz64 ...)
+(Ctz32NonZero ...) => (Ctz32 ...)
+(Ctz64NonZero ...) => (Ctz64 ...)
-(Ctz64 x) && objabi.GOPPC64<=8 -> (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
-(Ctz64 x) -> (CNTTZD x)
-(Ctz32 x) && objabi.GOPPC64<=8 -> (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
-(Ctz32 x) -> (CNTTZW (MOVWZreg x))
-(Ctz16 x) -> (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
-(Ctz8 x) -> (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
+(Ctz64 x) && objabi.GOPPC64<=8 => (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
+(Ctz64 x) => (CNTTZD x)
+(Ctz32 x) && objabi.GOPPC64<=8 => (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
+(Ctz32 x) => (CNTTZW (MOVWZreg x))
+(Ctz16 x) => (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
+(Ctz8 x) => (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
-(BitLen64 x) -> (SUB (MOVDconst [64]) (CNTLZD <typ.Int> x))
-(BitLen32 x) -> (SUB (MOVDconst [32]) (CNTLZW <typ.Int> x))
+(BitLen64 x) => (SUB (MOVDconst [64]) (CNTLZD <typ.Int> x))
+(BitLen32 x) => (SUB (MOVDconst [32]) (CNTLZW <typ.Int> x))
-(PopCount64 ...) -> (POPCNTD ...)
-(PopCount32 x) -> (POPCNTW (MOVWZreg x))
-(PopCount16 x) -> (POPCNTW (MOVHZreg x))
-(PopCount8 x) -> (POPCNTB (MOVBZreg x))
+(PopCount64 ...) => (POPCNTD ...)
+(PopCount32 x) => (POPCNTW (MOVWZreg x))
+(PopCount16 x) => (POPCNTW (MOVHZreg x))
+(PopCount8 x) => (POPCNTB (MOVBZreg x))
-(And(64|32|16|8) ...) -> (AND ...)
-(Or(64|32|16|8) ...) -> (OR ...)
-(Xor(64|32|16|8) ...) -> (XOR ...)
+(And(64|32|16|8) ...) => (AND ...)
+(Or(64|32|16|8) ...) => (OR ...)
+(Xor(64|32|16|8) ...) => (XOR ...)
-(Neg(64|32|16|8) ...) -> (NEG ...)
-(Neg64F ...) -> (FNEG ...)
-(Neg32F ...) -> (FNEG ...)
+(Neg(64|32|16|8) ...) => (NEG ...)
+(Neg64F ...) => (FNEG ...)
+(Neg32F ...) => (FNEG ...)
-(Com(64|32|16|8) x) -> (NOR x x)
+(Com(64|32|16|8) x) => (NOR x x)
// Lowering boolean ops
-(AndB ...) -> (AND ...)
-(OrB ...) -> (OR ...)
-(Not x) -> (XORconst [1] x)
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(Not x) => (XORconst [1] x)
// Use ANDN for AND x NOT y
-(AND x (NOR y y)) -> (ANDN x y)
+(AND x (NOR y y)) => (ANDN x y)
// Lowering comparisons
-(EqB x y) -> (ANDconst [1] (EQV x y))
+(EqB x y) => (ANDconst [1] (EQV x y))
// Sign extension dependence on operand sign sets up for sign/zero-extension elision later
-(Eq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
-(Eq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
-(Eq8 x y) -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
-(Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
-(Eq32 x y) -> (Equal (CMPW x y))
-(Eq64 x y) -> (Equal (CMP x y))
-(Eq32F x y) -> (Equal (FCMPU x y))
-(Eq64F x y) -> (Equal (FCMPU x y))
-(EqPtr x y) -> (Equal (CMP x y))
-
-(NeqB ...) -> (XOR ...)
+(Eq8 x y) && isSigned(x.Type) && isSigned(y.Type) => (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Eq16 x y) && isSigned(x.Type) && isSigned(y.Type) => (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Eq8 x y) => (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) => (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) => (Equal (CMPW x y))
+(Eq64 x y) => (Equal (CMP x y))
+(Eq32F x y) => (Equal (FCMPU x y))
+(Eq64F x y) => (Equal (FCMPU x y))
+(EqPtr x y) => (Equal (CMP x y))
+
+(NeqB ...) => (XOR ...)
// Like Eq8 and Eq16, prefer sign extension likely to enable later elision.
-(Neq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
-(Neq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
-(Neq8 x y) -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
-(Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
-(Neq32 x y) -> (NotEqual (CMPW x y))
-(Neq64 x y) -> (NotEqual (CMP x y))
-(Neq32F x y) -> (NotEqual (FCMPU x y))
-(Neq64F x y) -> (NotEqual (FCMPU x y))
-(NeqPtr x y) -> (NotEqual (CMP x y))
-
-(Less8 x y) -> (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
-(Less16 x y) -> (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
-(Less32 x y) -> (LessThan (CMPW x y))
-(Less64 x y) -> (LessThan (CMP x y))
-(Less32F x y) -> (FLessThan (FCMPU x y))
-(Less64F x y) -> (FLessThan (FCMPU x y))
-
-(Less8U x y) -> (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
-(Less16U x y) -> (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
-(Less32U x y) -> (LessThan (CMPWU x y))
-(Less64U x y) -> (LessThan (CMPU x y))
-
-(Leq8 x y) -> (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
-(Leq16 x y) -> (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
-(Leq32 x y) -> (LessEqual (CMPW x y))
-(Leq64 x y) -> (LessEqual (CMP x y))
-(Leq32F x y) -> (FLessEqual (FCMPU x y))
-(Leq64F x y) -> (FLessEqual (FCMPU x y))
-
-(Leq8U x y) -> (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
-(Leq16U x y) -> (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
-(Leq32U x y) -> (LessEqual (CMPWU x y))
-(Leq64U x y) -> (LessEqual (CMPU x y))
+(Neq8 x y) && isSigned(x.Type) && isSigned(y.Type) => (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Neq16 x y) && isSigned(x.Type) && isSigned(y.Type) => (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Neq8 x y) => (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Neq16 x y) => (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Neq32 x y) => (NotEqual (CMPW x y))
+(Neq64 x y) => (NotEqual (CMP x y))
+(Neq32F x y) => (NotEqual (FCMPU x y))
+(Neq64F x y) => (NotEqual (FCMPU x y))
+(NeqPtr x y) => (NotEqual (CMP x y))
+
+(Less8 x y) => (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Less16 x y) => (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Less32 x y) => (LessThan (CMPW x y))
+(Less64 x y) => (LessThan (CMP x y))
+(Less32F x y) => (FLessThan (FCMPU x y))
+(Less64F x y) => (FLessThan (FCMPU x y))
+
+(Less8U x y) => (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Less16U x y) => (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Less32U x y) => (LessThan (CMPWU x y))
+(Less64U x y) => (LessThan (CMPU x y))
+
+(Leq8 x y) => (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) => (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) => (LessEqual (CMPW x y))
+(Leq64 x y) => (LessEqual (CMP x y))
+(Leq32F x y) => (FLessEqual (FCMPU x y))
+(Leq64F x y) => (FLessEqual (FCMPU x y))
+
+(Leq8U x y) => (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) => (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) => (LessEqual (CMPWU x y))
+(Leq64U x y) => (LessEqual (CMPU x y))
// Absorb pseudo-ops into blocks.
-(If (Equal cc) yes no) -> (EQ cc yes no)
-(If (NotEqual cc) yes no) -> (NE cc yes no)
-(If (LessThan cc) yes no) -> (LT cc yes no)
-(If (LessEqual cc) yes no) -> (LE cc yes no)
-(If (GreaterThan cc) yes no) -> (GT cc yes no)
-(If (GreaterEqual cc) yes no) -> (GE cc yes no)
-(If (FLessThan cc) yes no) -> (FLT cc yes no)
-(If (FLessEqual cc) yes no) -> (FLE cc yes no)
-(If (FGreaterThan cc) yes no) -> (FGT cc yes no)
-(If (FGreaterEqual cc) yes no) -> (FGE cc yes no)
-
-(If cond yes no) -> (NE (CMPWconst [0] cond) yes no)
+(If (Equal cc) yes no) => (EQ cc yes no)
+(If (NotEqual cc) yes no) => (NE cc yes no)
+(If (LessThan cc) yes no) => (LT cc yes no)
+(If (LessEqual cc) yes no) => (LE cc yes no)
+(If (GreaterThan cc) yes no) => (GT cc yes no)
+(If (GreaterEqual cc) yes no) => (GE cc yes no)
+(If (FLessThan cc) yes no) => (FLT cc yes no)
+(If (FLessEqual cc) yes no) => (FLE cc yes no)
+(If (FGreaterThan cc) yes no) => (FGT cc yes no)
+(If (FGreaterEqual cc) yes no) => (FGE cc yes no)
+
+(If cond yes no) => (NE (CMPWconst [0] cond) yes no)
// Absorb boolean tests into block
-(NE (CMPWconst [0] (Equal cc)) yes no) -> (EQ cc yes no)
-(NE (CMPWconst [0] (NotEqual cc)) yes no) -> (NE cc yes no)
-(NE (CMPWconst [0] (LessThan cc)) yes no) -> (LT cc yes no)
-(NE (CMPWconst [0] (LessEqual cc)) yes no) -> (LE cc yes no)
-(NE (CMPWconst [0] (GreaterThan cc)) yes no) -> (GT cc yes no)
-(NE (CMPWconst [0] (GreaterEqual cc)) yes no) -> (GE cc yes no)
-(NE (CMPWconst [0] (FLessThan cc)) yes no) -> (FLT cc yes no)
-(NE (CMPWconst [0] (FLessEqual cc)) yes no) -> (FLE cc yes no)
-(NE (CMPWconst [0] (FGreaterThan cc)) yes no) -> (FGT cc yes no)
-(NE (CMPWconst [0] (FGreaterEqual cc)) yes no) -> (FGE cc yes no)
+(NE (CMPWconst [0] (Equal cc)) yes no) => (EQ cc yes no)
+(NE (CMPWconst [0] (NotEqual cc)) yes no) => (NE cc yes no)
+(NE (CMPWconst [0] (LessThan cc)) yes no) => (LT cc yes no)
+(NE (CMPWconst [0] (LessEqual cc)) yes no) => (LE cc yes no)
+(NE (CMPWconst [0] (GreaterThan cc)) yes no) => (GT cc yes no)
+(NE (CMPWconst [0] (GreaterEqual cc)) yes no) => (GE cc yes no)
+(NE (CMPWconst [0] (FLessThan cc)) yes no) => (FLT cc yes no)
+(NE (CMPWconst [0] (FLessEqual cc)) yes no) => (FLE cc yes no)
+(NE (CMPWconst [0] (FGreaterThan cc)) yes no) => (FGT cc yes no)
+(NE (CMPWconst [0] (FGreaterEqual cc)) yes no) => (FGE cc yes no)
// Elide compares of bit tests // TODO need to make both CC and result of ANDCC available.
-(EQ (CMPconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no)
-(NE (CMPconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no)
-(EQ (CMPWconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no)
-(NE (CMPWconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no)
+(EQ (CMPconst [0] (ANDconst [c] x)) yes no) => (EQ (ANDCCconst [c] x) yes no)
+(NE (CMPconst [0] (ANDconst [c] x)) yes no) => (NE (ANDCCconst [c] x) yes no)
+(EQ (CMPWconst [0] (ANDconst [c] x)) yes no) => (EQ (ANDCCconst [c] x) yes no)
+(NE (CMPWconst [0] (ANDconst [c] x)) yes no) => (NE (ANDCCconst [c] x) yes no)
// absorb flag constants into branches
-(EQ (FlagEQ) yes no) -> (First yes no)
-(EQ (FlagLT) yes no) -> (First no yes)
-(EQ (FlagGT) yes no) -> (First no yes)
+(EQ (FlagEQ) yes no) => (First yes no)
+(EQ (FlagLT) yes no) => (First no yes)
+(EQ (FlagGT) yes no) => (First no yes)
-(NE (FlagEQ) yes no) -> (First no yes)
-(NE (FlagLT) yes no) -> (First yes no)
-(NE (FlagGT) yes no) -> (First yes no)
+(NE (FlagEQ) yes no) => (First no yes)
+(NE (FlagLT) yes no) => (First yes no)
+(NE (FlagGT) yes no) => (First yes no)
-(LT (FlagEQ) yes no) -> (First no yes)
-(LT (FlagLT) yes no) -> (First yes no)
-(LT (FlagGT) yes no) -> (First no yes)
+(LT (FlagEQ) yes no) => (First no yes)
+(LT (FlagLT) yes no) => (First yes no)
+(LT (FlagGT) yes no) => (First no yes)
-(LE (FlagEQ) yes no) -> (First yes no)
-(LE (FlagLT) yes no) -> (First yes no)
-(LE (FlagGT) yes no) -> (First no yes)
+(LE (FlagEQ) yes no) => (First yes no)
+(LE (FlagLT) yes no) => (First yes no)
+(LE (FlagGT) yes no) => (First no yes)
-(GT (FlagEQ) yes no) -> (First no yes)
-(GT (FlagLT) yes no) -> (First no yes)
-(GT (FlagGT) yes no) -> (First yes no)
+(GT (FlagEQ) yes no) => (First no yes)
+(GT (FlagLT) yes no) => (First no yes)
+(GT (FlagGT) yes no) => (First yes no)
-(GE (FlagEQ) yes no) -> (First yes no)
-(GE (FlagLT) yes no) -> (First no yes)
-(GE (FlagGT) yes no) -> (First yes no)
+(GE (FlagEQ) yes no) => (First yes no)
+(GE (FlagLT) yes no) => (First no yes)
+(GE (FlagGT) yes no) => (First yes no)
// absorb InvertFlags into branches
-(LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
-(GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
-(LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
-(GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
-(EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
-(NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
// constant comparisons
-(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
-(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) -> (FlagLT)
-(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) -> (FlagGT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) => (FlagLT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) => (FlagGT)
-(CMPconst (MOVDconst [x]) [y]) && x==y -> (FlagEQ)
-(CMPconst (MOVDconst [x]) [y]) && x<y -> (FlagLT)
-(CMPconst (MOVDconst [x]) [y]) && x>y -> (FlagGT)
+(CMPconst (MOVDconst [x]) [y]) && x==y => (FlagEQ)
+(CMPconst (MOVDconst [x]) [y]) && x<y => (FlagLT)
+(CMPconst (MOVDconst [x]) [y]) && x>y => (FlagGT)
-(CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
-(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) -> (FlagLT)
-(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) -> (FlagGT)
+(CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) => (FlagLT)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) => (FlagGT)
-(CMPUconst (MOVDconst [x]) [y]) && x==y -> (FlagEQ)
-(CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) -> (FlagLT)
-(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) -> (FlagGT)
+(CMPUconst (MOVDconst [x]) [y]) && x==y => (FlagEQ)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) => (FlagLT)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT)
// other known comparisons
-//(CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT)
-//(CMPconst (MOVHUreg _) [c]) && 0xffff < c -> (FlagLT)
-//(CMPconst (ANDconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT)
-//(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) -> (FlagLT)
+//(CMPconst (MOVBUreg _) [c]) && 0xff < c => (FlagLT)
+//(CMPconst (MOVHUreg _) [c]) && 0xffff < c => (FlagLT)
+//(CMPconst (ANDconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) => (FlagLT)
+//(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) => (FlagLT)
// absorb flag constants into boolean values
-(Equal (FlagEQ)) -> (MOVDconst [1])
-(Equal (FlagLT)) -> (MOVDconst [0])
-(Equal (FlagGT)) -> (MOVDconst [0])
+(Equal (FlagEQ)) => (MOVDconst [1])
+(Equal (FlagLT)) => (MOVDconst [0])
+(Equal (FlagGT)) => (MOVDconst [0])
-(NotEqual (FlagEQ)) -> (MOVDconst [0])
-(NotEqual (FlagLT)) -> (MOVDconst [1])
-(NotEqual (FlagGT)) -> (MOVDconst [1])
+(NotEqual (FlagEQ)) => (MOVDconst [0])
+(NotEqual (FlagLT)) => (MOVDconst [1])
+(NotEqual (FlagGT)) => (MOVDconst [1])
-(LessThan (FlagEQ)) -> (MOVDconst [0])
-(LessThan (FlagLT)) -> (MOVDconst [1])
-(LessThan (FlagGT)) -> (MOVDconst [0])
+(LessThan (FlagEQ)) => (MOVDconst [0])
+(LessThan (FlagLT)) => (MOVDconst [1])
+(LessThan (FlagGT)) => (MOVDconst [0])
-(LessEqual (FlagEQ)) -> (MOVDconst [1])
-(LessEqual (FlagLT)) -> (MOVDconst [1])
-(LessEqual (FlagGT)) -> (MOVDconst [0])
+(LessEqual (FlagEQ)) => (MOVDconst [1])
+(LessEqual (FlagLT)) => (MOVDconst [1])
+(LessEqual (FlagGT)) => (MOVDconst [0])
-(GreaterThan (FlagEQ)) -> (MOVDconst [0])
-(GreaterThan (FlagLT)) -> (MOVDconst [0])
-(GreaterThan (FlagGT)) -> (MOVDconst [1])
+(GreaterThan (FlagEQ)) => (MOVDconst [0])
+(GreaterThan (FlagLT)) => (MOVDconst [0])
+(GreaterThan (FlagGT)) => (MOVDconst [1])
-(GreaterEqual (FlagEQ)) -> (MOVDconst [1])
-(GreaterEqual (FlagLT)) -> (MOVDconst [0])
-(GreaterEqual (FlagGT)) -> (MOVDconst [1])
+(GreaterEqual (FlagEQ)) => (MOVDconst [1])
+(GreaterEqual (FlagLT)) => (MOVDconst [0])
+(GreaterEqual (FlagGT)) => (MOVDconst [1])
// absorb InvertFlags into boolean values
-(Equal (InvertFlags x)) -> (Equal x)
-(NotEqual (InvertFlags x)) -> (NotEqual x)
-(LessThan (InvertFlags x)) -> (GreaterThan x)
-(GreaterThan (InvertFlags x)) -> (LessThan x)
-(LessEqual (InvertFlags x)) -> (GreaterEqual x)
-(GreaterEqual (InvertFlags x)) -> (LessEqual x)
+(Equal (InvertFlags x)) => (Equal x)
+(NotEqual (InvertFlags x)) => (NotEqual x)
+(LessThan (InvertFlags x)) => (GreaterThan x)
+(GreaterThan (InvertFlags x)) => (LessThan x)
+(LessEqual (InvertFlags x)) => (GreaterEqual x)
+(GreaterEqual (InvertFlags x)) => (LessEqual x)
// Elide compares of bit tests // TODO need to make both CC and result of ANDCC available.
-((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (ANDconst [c] x)) yes no) -> ((EQ|NE|LT|LE|GT|GE) (ANDCCconst [c] x) yes no)
-((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (ANDconst [c] x)) yes no) -> ((EQ|NE|LT|LE|GT|GE) (ANDCCconst [c] x) yes no)
-((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 -> ((EQ|NE|LT|LE|GT|GE) (ANDCC x y) yes no)
-((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 -> ((EQ|NE|LT|LE|GT|GE) (ORCC x y) yes no)
-((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 -> ((EQ|NE|LT|LE|GT|GE) (XORCC x y) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (ANDconst [c] x)) yes no) => ((EQ|NE|LT|LE|GT|GE) (ANDCCconst [c] x) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (ANDconst [c] x)) yes no) => ((EQ|NE|LT|LE|GT|GE) (ANDCCconst [c] x) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (ANDCC x y) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (ORCC x y) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (XORCC x y) yes no)
-(CondSelect x y bool) && flagArg(bool) != nil -> (ISEL [2] x y bool)
-(CondSelect x y bool) && flagArg(bool) == nil -> (ISEL [2] x y (CMPWconst [0] bool))
+(CondSelect x y bool) && flagArg(bool) != nil => (ISEL [2] x y bool)
+(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [2] x y (CMPWconst [0] bool))
// Lowering loads
-(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
-(Load <t> ptr mem) && is32BitInt(t) && isSigned(t) -> (MOVWload ptr mem)
-(Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) -> (MOVWZload ptr mem)
-(Load <t> ptr mem) && is16BitInt(t) && isSigned(t) -> (MOVHload ptr mem)
-(Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) -> (MOVHZload ptr mem)
-(Load <t> ptr mem) && t.IsBoolean() -> (MOVBZload ptr mem)
-(Load <t> ptr mem) && is8BitInt(t) && isSigned(t) -> (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load.
-(Load <t> ptr mem) && is8BitInt(t) && !isSigned(t) -> (MOVBZload ptr mem)
-
-(Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
-(Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
-
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is32BitFloat(val.Type) -> (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) -> x -- type is wrong
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type)) -> (MOVDstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitInt(val.Type) -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && isSigned(t) => (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) => (MOVWZload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && isSigned(t) => (MOVHload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) => (MOVHZload ptr mem)
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBZload ptr mem)
+(Load <t> ptr mem) && is8BitInt(t) && isSigned(t) => (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load.
+(Load <t> ptr mem) && is8BitInt(t) && !isSigned(t) => (MOVBZload ptr mem)
+
+(Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
+
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is32BitFloat(val.Type) => (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) => x -- type is wrong
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVSstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type)) => (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitInt(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
// Using Zero instead of LoweredZero allows the
// target address to be folded where possible.
-(Zero [0] _ mem) -> mem
-(Zero [1] destptr mem) -> (MOVBstorezero destptr mem)
-(Zero [2] destptr mem) ->
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (MOVBstorezero destptr mem)
+(Zero [2] destptr mem) =>
(MOVHstorezero destptr mem)
-(Zero [3] destptr mem) ->
+(Zero [3] destptr mem) =>
(MOVBstorezero [2] destptr
(MOVHstorezero destptr mem))
-(Zero [4] destptr mem) ->
+(Zero [4] destptr mem) =>
(MOVWstorezero destptr mem)
-(Zero [5] destptr mem) ->
+(Zero [5] destptr mem) =>
(MOVBstorezero [4] destptr
(MOVWstorezero destptr mem))
-(Zero [6] destptr mem) ->
+(Zero [6] destptr mem) =>
(MOVHstorezero [4] destptr
(MOVWstorezero destptr mem))
-(Zero [7] destptr mem) ->
+(Zero [7] destptr mem) =>
(MOVBstorezero [6] destptr
(MOVHstorezero [4] destptr
(MOVWstorezero destptr mem)))
// MOVD for store with DS must have offsets that are multiple of 4
-(Zero [8] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Zero [8] {t} destptr mem) && t.Alignment()%4 == 0 =>
(MOVDstorezero destptr mem)
-(Zero [8] destptr mem) ->
+(Zero [8] destptr mem) =>
(MOVWstorezero [4] destptr
(MOVWstorezero [0] destptr mem))
// Handle these cases only if aligned properly, otherwise use general case below
-(Zero [12] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Zero [12] {t} destptr mem) && t.Alignment()%4 == 0 =>
(MOVWstorezero [8] destptr
(MOVDstorezero [0] destptr mem))
-(Zero [16] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Zero [16] {t} destptr mem) && t.Alignment()%4 == 0 =>
(MOVDstorezero [8] destptr
(MOVDstorezero [0] destptr mem))
-(Zero [24] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Zero [24] {t} destptr mem) && t.Alignment()%4 == 0 =>
(MOVDstorezero [16] destptr
(MOVDstorezero [8] destptr
(MOVDstorezero [0] destptr mem)))
-(Zero [32] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Zero [32] {t} destptr mem) && t.Alignment()%4 == 0 =>
(MOVDstorezero [24] destptr
(MOVDstorezero [16] destptr
(MOVDstorezero [8] destptr
// Handle cases not handled above
// Lowered Short cases do not generate loops, and as a result don't clobber
// the address registers or flags.
-(Zero [s] ptr mem) && objabi.GOPPC64 <= 8 && s < 64 -> (LoweredZeroShort [s] ptr mem)
-(Zero [s] ptr mem) && objabi.GOPPC64 <= 8 -> (LoweredZero [s] ptr mem)
-(Zero [s] ptr mem) && s < 128 && objabi.GOPPC64 >= 9 -> (LoweredQuadZeroShort [s] ptr mem)
-(Zero [s] ptr mem) && objabi.GOPPC64 >= 9 -> (LoweredQuadZero [s] ptr mem)
+(Zero [s] ptr mem) && objabi.GOPPC64 <= 8 && s < 64 => (LoweredZeroShort [s] ptr mem)
+(Zero [s] ptr mem) && objabi.GOPPC64 <= 8 => (LoweredZero [s] ptr mem)
+(Zero [s] ptr mem) && s < 128 && objabi.GOPPC64 >= 9 => (LoweredQuadZeroShort [s] ptr mem)
+(Zero [s] ptr mem) && objabi.GOPPC64 >= 9 => (LoweredQuadZero [s] ptr mem)
// moves
// Only the MOVD and MOVW instructions require 4 byte
// alignment in the offset field. The other MOVx instructions
// allow any alignment.
-(Move [0] _ _ mem) -> mem
-(Move [1] dst src mem) -> (MOVBstore dst (MOVBZload src mem) mem)
-(Move [2] dst src mem) ->
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem)
+(Move [2] dst src mem) =>
(MOVHstore dst (MOVHZload src mem) mem)
-(Move [4] dst src mem) ->
+(Move [4] dst src mem) =>
(MOVWstore dst (MOVWZload src mem) mem)
// MOVD for load and store must have offsets that are multiple of 4
-(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
(MOVDstore dst (MOVDload src mem) mem)
-(Move [8] dst src mem) ->
+(Move [8] dst src mem) =>
(MOVWstore [4] dst (MOVWZload [4] src mem)
(MOVWstore dst (MOVWZload src mem) mem))
-(Move [3] dst src mem) ->
+(Move [3] dst src mem) =>
(MOVBstore [2] dst (MOVBZload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem))
-(Move [5] dst src mem) ->
+(Move [5] dst src mem) =>
(MOVBstore [4] dst (MOVBZload [4] src mem)
(MOVWstore dst (MOVWZload src mem) mem))
-(Move [6] dst src mem) ->
+(Move [6] dst src mem) =>
(MOVHstore [4] dst (MOVHZload [4] src mem)
(MOVWstore dst (MOVWZload src mem) mem))
-(Move [7] dst src mem) ->
+(Move [7] dst src mem) =>
(MOVBstore [6] dst (MOVBZload [6] src mem)
(MOVHstore [4] dst (MOVHZload [4] src mem)
(MOVWstore dst (MOVWZload src mem) mem)))
// Large move uses a loop. Since the address is computed and the
// offset is zero, any alignment can be used.
-(Move [s] dst src mem) && s > 8 && objabi.GOPPC64 <= 8 && logLargeCopy(v, s) ->
+(Move [s] dst src mem) && s > 8 && objabi.GOPPC64 <= 8 && logLargeCopy(v, s) =>
(LoweredMove [s] dst src mem)
-(Move [s] dst src mem) && s > 8 && s <= 64 && objabi.GOPPC64 >= 9 ->
+(Move [s] dst src mem) && s > 8 && s <= 64 && objabi.GOPPC64 >= 9 =>
(LoweredQuadMoveShort [s] dst src mem)
-(Move [s] dst src mem) && s > 8 && objabi.GOPPC64 >= 9 && logLargeCopy(v, s) ->
+(Move [s] dst src mem) && s > 8 && objabi.GOPPC64 >= 9 && logLargeCopy(v, s) =>
(LoweredQuadMove [s] dst src mem)
// Calls
// Lowering calls
-(StaticCall ...) -> (CALLstatic ...)
-(ClosureCall ...) -> (CALLclosure ...)
-(InterCall ...) -> (CALLinter ...)
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
// Miscellaneous
-(GetClosurePtr ...) -> (LoweredGetClosurePtr ...)
-(GetCallerSP ...) -> (LoweredGetCallerSP ...)
-(GetCallerPC ...) -> (LoweredGetCallerPC ...)
-(IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr))
-(IsInBounds idx len) -> (LessThan (CMPU idx len))
-(IsSliceInBounds idx len) -> (LessEqual (CMPU idx len))
-(NilCheck ...) -> (LoweredNilCheck ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
+(IsInBounds idx len) => (LessThan (CMPU idx len))
+(IsSliceInBounds idx len) => (LessEqual (CMPU idx len))
+(NilCheck ...) => (LoweredNilCheck ...)
// Write barrier.
-(WB ...) -> (LoweredWB ...)
+(WB ...) => (LoweredWB ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 -> (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 -> (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 -> (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
// Optimizations
// Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
// so ORconst, XORconst easily expand into a pair.
// Include very-large constants in the const-const case.
-(AND (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&d])
-(OR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|d])
-(XOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c^d])
-(ORN (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|^d])
-(ANDN (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&^d])
-(NOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [^(c|d)])
+(AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d])
+(OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d])
+(XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d])
+(ORN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|^d])
+(ANDN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&^d])
+(NOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [^(c|d)])
// Discover consts
-(AND x (MOVDconst [c])) && isU16Bit(c) -> (ANDconst [c] x)
-(XOR x (MOVDconst [c])) && isU32Bit(c) -> (XORconst [c] x)
-(OR x (MOVDconst [c])) && isU32Bit(c) -> (ORconst [c] x)
+(AND x (MOVDconst [c])) && isU16Bit(c) => (ANDconst [c] x)
+(XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x)
+(OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x)
// Simplify consts
-(ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
-(ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x)
-(XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
-(ANDconst [-1] x) -> x
-(ANDconst [0] _) -> (MOVDconst [0])
-(XORconst [0] x) -> x
-(ORconst [-1] _) -> (MOVDconst [-1])
-(ORconst [0] x) -> x
-
-// zero-extend of small and -> small and
-(MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF -> y
-(MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y
-(MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF -> y
-(MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF -> y
-
-// sign extend of small-positive and -> small-positive-and
-(MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F -> y
-(MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF -> y
-(MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
-(MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF -> y
-
-// small and of zero-extend -> either zero-extend or small and
-(ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF -> y
-(ANDconst [0xFF] y:(MOVBreg _)) -> y
-(ANDconst [c] y:(MOVHZreg _)) && c&0xFFFF == 0xFFFF -> y
-(ANDconst [0xFFFF] y:(MOVHreg _)) -> y
-
-(AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF -> y
-(AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) -> (MOVWZreg x)
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
+(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
+(ANDconst [-1] x) => x
+(ANDconst [0] _) => (MOVDconst [0])
+(XORconst [0] x) => x
+(ORconst [-1] _) => (MOVDconst [-1])
+(ORconst [0] x) => x
+
+// zero-extend of small and => small and
+(MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF => y
+(MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y
+(MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF => y
+(MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF => y
+
+// sign extend of small-positive and => small-positive-and
+(MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F => y
+(MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF => y
+(MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
+(MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF => y
+
+// small and of zero-extend => either zero-extend or small and
+(ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF => y
+(ANDconst [0xFF] y:(MOVBreg _)) => y
+(ANDconst [c] y:(MOVHZreg _)) && c&0xFFFF == 0xFFFF => y
+(ANDconst [0xFFFF] y:(MOVHreg _)) => y
+
+(AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF => y
+(AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x)
// normal case
-(ANDconst [c] (MOV(B|BZ)reg x)) -> (ANDconst [c&0xFF] x)
-(ANDconst [c] (MOV(H|HZ)reg x)) -> (ANDconst [c&0xFFFF] x)
-(ANDconst [c] (MOV(W|WZ)reg x)) -> (ANDconst [c&0xFFFFFFFF] x)
+(ANDconst [c] (MOV(B|BZ)reg x)) => (ANDconst [c&0xFF] x)
+(ANDconst [c] (MOV(H|HZ)reg x)) => (ANDconst [c&0xFFFF] x)
+(ANDconst [c] (MOV(W|WZ)reg x)) => (ANDconst [c&0xFFFFFFFF] x)
// Eliminate unnecessary sign/zero extend following right shift
-(MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) -> (SRWconst [c] (MOVBZreg x))
-(MOV(H|W)Zreg (SRWconst [c] (MOVHZreg x))) -> (SRWconst [c] (MOVHZreg x))
-(MOVWZreg (SRWconst [c] (MOVWZreg x))) -> (SRWconst [c] (MOVWZreg x))
-(MOV(B|H|W)reg (SRAWconst [c] (MOVBreg x))) -> (SRAWconst [c] (MOVBreg x))
-(MOV(H|W)reg (SRAWconst [c] (MOVHreg x))) -> (SRAWconst [c] (MOVHreg x))
-(MOVWreg (SRAWconst [c] (MOVWreg x))) -> (SRAWconst [c] (MOVWreg x))
-
-(MOVWZreg (SRWconst [c] x)) && sizeof(x.Type) <= 32 -> (SRWconst [c] x)
-(MOVHZreg (SRWconst [c] x)) && sizeof(x.Type) <= 16 -> (SRWconst [c] x)
-(MOVBZreg (SRWconst [c] x)) && sizeof(x.Type) == 8 -> (SRWconst [c] x)
-(MOVWreg (SRAWconst [c] x)) && sizeof(x.Type) <= 32 -> (SRAWconst [c] x)
-(MOVHreg (SRAWconst [c] x)) && sizeof(x.Type) <= 16 -> (SRAWconst [c] x)
-(MOVBreg (SRAWconst [c] x)) && sizeof(x.Type) == 8 -> (SRAWconst [c] x)
+(MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x))
+(MOV(H|W)Zreg (SRWconst [c] (MOVHZreg x))) => (SRWconst [c] (MOVHZreg x))
+(MOVWZreg (SRWconst [c] (MOVWZreg x))) => (SRWconst [c] (MOVWZreg x))
+(MOV(B|H|W)reg (SRAWconst [c] (MOVBreg x))) => (SRAWconst [c] (MOVBreg x))
+(MOV(H|W)reg (SRAWconst [c] (MOVHreg x))) => (SRAWconst [c] (MOVHreg x))
+(MOVWreg (SRAWconst [c] (MOVWreg x))) => (SRAWconst [c] (MOVWreg x))
+
+(MOVWZreg (SRWconst [c] x)) && sizeof(x.Type) <= 32 => (SRWconst [c] x)
+(MOVHZreg (SRWconst [c] x)) && sizeof(x.Type) <= 16 => (SRWconst [c] x)
+(MOVBZreg (SRWconst [c] x)) && sizeof(x.Type) == 8 => (SRWconst [c] x)
+(MOVWreg (SRAWconst [c] x)) && sizeof(x.Type) <= 32 => (SRAWconst [c] x)
+(MOVHreg (SRAWconst [c] x)) && sizeof(x.Type) <= 16 => (SRAWconst [c] x)
+(MOVBreg (SRAWconst [c] x)) && sizeof(x.Type) == 8 => (SRAWconst [c] x)
// initial right shift will handle sign/zero extend
-(MOVBZreg (SRDconst [c] x)) && c>=56 -> (SRDconst [c] x)
-(MOVBreg (SRDconst [c] x)) && c>56 -> (SRDconst [c] x)
-(MOVBreg (SRDconst [c] x)) && c==56 -> (SRADconst [c] x)
-(MOVBreg (SRADconst [c] x)) && c>=56 -> (SRADconst [c] x)
-(MOVBZreg (SRWconst [c] x)) && c>=24 -> (SRWconst [c] x)
-(MOVBreg (SRWconst [c] x)) && c>24 -> (SRWconst [c] x)
-(MOVBreg (SRWconst [c] x)) && c==24 -> (SRAWconst [c] x)
-(MOVBreg (SRAWconst [c] x)) && c>=24 -> (SRAWconst [c] x)
-
-(MOVHZreg (SRDconst [c] x)) && c>=48 -> (SRDconst [c] x)
-(MOVHreg (SRDconst [c] x)) && c>48 -> (SRDconst [c] x)
-(MOVHreg (SRDconst [c] x)) && c==48 -> (SRADconst [c] x)
-(MOVHreg (SRADconst [c] x)) && c>=48 -> (SRADconst [c] x)
-(MOVHZreg (SRWconst [c] x)) && c>=16 -> (SRWconst [c] x)
-(MOVHreg (SRWconst [c] x)) && c>16 -> (SRWconst [c] x)
-(MOVHreg (SRAWconst [c] x)) && c>=16 -> (SRAWconst [c] x)
-(MOVHreg (SRWconst [c] x)) && c==16 -> (SRAWconst [c] x)
-
-(MOVWZreg (SRDconst [c] x)) && c>=32 -> (SRDconst [c] x)
-(MOVWreg (SRDconst [c] x)) && c>32 -> (SRDconst [c] x)
-(MOVWreg (SRADconst [c] x)) && c>=32 -> (SRADconst [c] x)
-(MOVWreg (SRDconst [c] x)) && c==32 -> (SRADconst [c] x)
+(MOVBZreg (SRDconst [c] x)) && c>=56 => (SRDconst [c] x)
+(MOVBreg (SRDconst [c] x)) && c>56 => (SRDconst [c] x)
+(MOVBreg (SRDconst [c] x)) && c==56 => (SRADconst [c] x)
+(MOVBreg (SRADconst [c] x)) && c>=56 => (SRADconst [c] x)
+(MOVBZreg (SRWconst [c] x)) && c>=24 => (SRWconst [c] x)
+(MOVBreg (SRWconst [c] x)) && c>24 => (SRWconst [c] x)
+(MOVBreg (SRWconst [c] x)) && c==24 => (SRAWconst [c] x)
+(MOVBreg (SRAWconst [c] x)) && c>=24 => (SRAWconst [c] x)
+
+(MOVHZreg (SRDconst [c] x)) && c>=48 => (SRDconst [c] x)
+(MOVHreg (SRDconst [c] x)) && c>48 => (SRDconst [c] x)
+(MOVHreg (SRDconst [c] x)) && c==48 => (SRADconst [c] x)
+(MOVHreg (SRADconst [c] x)) && c>=48 => (SRADconst [c] x)
+(MOVHZreg (SRWconst [c] x)) && c>=16 => (SRWconst [c] x)
+(MOVHreg (SRWconst [c] x)) && c>16 => (SRWconst [c] x)
+(MOVHreg (SRAWconst [c] x)) && c>=16 => (SRAWconst [c] x)
+(MOVHreg (SRWconst [c] x)) && c==16 => (SRAWconst [c] x)
+
+(MOVWZreg (SRDconst [c] x)) && c>=32 => (SRDconst [c] x)
+(MOVWreg (SRDconst [c] x)) && c>32 => (SRDconst [c] x)
+(MOVWreg (SRADconst [c] x)) && c>=32 => (SRADconst [c] x)
+(MOVWreg (SRDconst [c] x)) && c==32 => (SRADconst [c] x)
// Various redundant zero/sign extension combinations.
-(MOVBZreg y:(MOVBZreg _)) -> y // repeat
-(MOVBreg y:(MOVBreg _)) -> y // repeat
-(MOVBreg (MOVBZreg x)) -> (MOVBreg x)
-(MOVBZreg (MOVBreg x)) -> (MOVBZreg x)
+(MOVBZreg y:(MOVBZreg _)) => y // repeat
+(MOVBreg y:(MOVBreg _)) => y // repeat
+(MOVBreg (MOVBZreg x)) => (MOVBreg x)
+(MOVBZreg (MOVBreg x)) => (MOVBZreg x)
// H - there are more combinations than these
-(MOVHZreg y:(MOVHZreg _)) -> y // repeat
-(MOVHZreg y:(MOVBZreg _)) -> y // wide of narrow
-(MOVHZreg y:(MOVHBRload _ _)) -> y
+(MOVHZreg y:(MOVHZreg _)) => y // repeat
+(MOVHZreg y:(MOVBZreg _)) => y // wide of narrow
+(MOVHZreg y:(MOVHBRload _ _)) => y
-(MOVHreg y:(MOVHreg _)) -> y // repeat
-(MOVHreg y:(MOVBreg _)) -> y // wide of narrow
+(MOVHreg y:(MOVHreg _)) => y // repeat
+(MOVHreg y:(MOVBreg _)) => y // wide of narrow
-(MOVHreg y:(MOVHZreg x)) -> (MOVHreg x)
-(MOVHZreg y:(MOVHreg x)) -> (MOVHZreg x)
+(MOVHreg y:(MOVHZreg x)) => (MOVHreg x)
+(MOVHZreg y:(MOVHreg x)) => (MOVHZreg x)
// W - there are more combinations than these
-(MOVWZreg y:(MOVWZreg _)) -> y // repeat
-(MOVWZreg y:(MOVHZreg _)) -> y // wide of narrow
-(MOVWZreg y:(MOVBZreg _)) -> y // wide of narrow
-(MOVWZreg y:(MOVHBRload _ _)) -> y
-(MOVWZreg y:(MOVWBRload _ _)) -> y
+(MOVWZreg y:(MOVWZreg _)) => y // repeat
+(MOVWZreg y:(MOVHZreg _)) => y // wide of narrow
+(MOVWZreg y:(MOVBZreg _)) => y // wide of narrow
+(MOVWZreg y:(MOVHBRload _ _)) => y
+(MOVWZreg y:(MOVWBRload _ _)) => y
-(MOVWreg y:(MOVWreg _)) -> y // repeat
-(MOVWreg y:(MOVHreg _)) -> y // wide of narrow
-(MOVWreg y:(MOVBreg _)) -> y // wide of narrow
+(MOVWreg y:(MOVWreg _)) => y // repeat
+(MOVWreg y:(MOVHreg _)) => y // wide of narrow
+(MOVWreg y:(MOVBreg _)) => y // wide of narrow
-(MOVWreg y:(MOVWZreg x)) -> (MOVWreg x)
-(MOVWZreg y:(MOVWreg x)) -> (MOVWZreg x)
+(MOVWreg y:(MOVWZreg x)) => (MOVWreg x)
+(MOVWZreg y:(MOVWreg x)) => (MOVWZreg x)
// Arithmetic constant ops
-(ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x)
-(ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x)
-(ADDconst [0] x) -> x
-(SUB x (MOVDconst [c])) && is32Bit(-c) -> (ADDconst [-c] x)
+(ADD x (MOVDconst [c])) && is32Bit(c) => (ADDconst [c] x)
+(ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) => (ADDconst [c+d] x)
+(ADDconst [0] x) => x
+(SUB x (MOVDconst [c])) && is32Bit(-c) => (ADDconst [-c] x)
// TODO deal with subtract-from-const
-(ADDconst [c] (MOVDaddr [d] {sym} x)) -> (MOVDaddr [c+d] {sym} x)
+(ADDconst [c] (MOVDaddr [d] {sym} x)) && is32Bit(c+int64(d)) => (MOVDaddr [int32(c+int64(d))] {sym} x)
-// Use register moves instead of stores and loads to move int<->float values
+// Use register moves instead of stores and loads to move int<=>float values
// Common with math Float64bits, Float64frombits
-(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) -> (MFVSRD x)
-(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) -> (MTVSRD x)
+(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) => (MFVSRD x)
+(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) => (MTVSRD x)
-(FMOVDstore [off] {sym} ptr (MTVSRD x) mem) -> (MOVDstore [off] {sym} ptr x mem)
-(MOVDstore [off] {sym} ptr (MFVSRD x) mem) -> (FMOVDstore [off] {sym} ptr x mem)
+(FMOVDstore [off] {sym} ptr (MTVSRD x) mem) => (MOVDstore [off] {sym} ptr x mem)
+(MOVDstore [off] {sym} ptr (MFVSRD x) mem) => (FMOVDstore [off] {sym} ptr x mem)
-(MTVSRD (MOVDconst [c])) -> (FMOVDconst [c])
-(MFVSRD (FMOVDconst [c])) -> (MOVDconst [c])
+(MTVSRD (MOVDconst [c])) => (FMOVDconst [math.Float64frombits(uint64(c))])
+(MFVSRD (FMOVDconst [c])) => (MOVDconst [int64(math.Float64bits(c))])
-(MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (FMOVDload [off] {sym} ptr mem)
-(MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVDload [off] {sym} ptr mem)
+(MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (FMOVDload [off] {sym} ptr mem)
+(MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVDload [off] {sym} ptr mem)
// Fold offsets for stores.
-(MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) && (off1+off2)%4 == 0 -> (MOVDstore [off1+off2] {sym} x val mem)
-(MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} x val mem)
-(MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} x val mem)
-(MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} x val mem)
+(MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0 => (MOVDstore [off1+int32(off2)] {sym} x val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} x val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} x val mem)
+(MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} x val mem)
-(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVSstore [off1+off2] {sym} ptr val mem)
-(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVDstore [off1+off2] {sym} ptr val mem)
+(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(int64(off1)+off2) => (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
+(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(int64(off1)+off2) => (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
// Fold address into load/store.
// The assembler needs to generate several instructions and use
// the temp register. So don't fold address of global, unless there
// is only one use.
(MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) ->
- (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) ->
- (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) ->
- (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 ->
- (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
+ (MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) ->
- (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ && (ptr.Op != OpSB || p.Uses == 1) =>
+ (FMOVSstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) ->
- (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ && (ptr.Op != OpSB || p.Uses == 1) =>
+ (FMOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) ->
- (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVBZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) ->
- (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) ->
- (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVHZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 ->
- (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
+ (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) ->
- (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVWZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 ->
- (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
+ (MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) ->
- (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ && (ptr.Op != OpSB || p.Uses == 1) =>
+ (FMOVSload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) ->
- (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ && (ptr.Op != OpSB || p.Uses == 1) =>
+ (FMOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// Fold offsets for loads.
-(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVSload [off1+off2] {sym} ptr mem)
-(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVDload [off1+off2] {sym} ptr mem)
+(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(int64(off1)+off2) => (FMOVSload [off1+int32(off2)] {sym} ptr mem)
+(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(int64(off1)+off2) => (FMOVDload [off1+int32(off2)] {sym} ptr mem)
-(MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) && (off1+off2)%4 == 0 -> (MOVDload [off1+off2] {sym} x mem)
-(MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) && (off1+off2)%4 == 0 -> (MOVWload [off1+off2] {sym} x mem)
-(MOVWZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWZload [off1+off2] {sym} x mem)
-(MOVHload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHload [off1+off2] {sym} x mem)
-(MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} x mem)
-(MOVBZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVBZload [off1+off2] {sym} x mem)
+(MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0 => (MOVDload [off1+int32(off2)] {sym} x mem)
+(MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0 => (MOVWload [off1+int32(off2)] {sym} x mem)
+(MOVWZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVWZload [off1+int32(off2)] {sym} x mem)
+(MOVHload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVHload [off1+int32(off2)] {sym} x mem)
+(MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVHZload [off1+int32(off2)] {sym} x mem)
+(MOVBZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVBZload [off1+int32(off2)] {sym} x mem)
// Determine load + addressing that can be done as a register indexed load
-(MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 -> (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem)
+(MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 => (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem)
// Determine indexed loads with constant values that can be done without index
-(MOV(D|W)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) && c%4 == 0 -> (MOV(D|W)load [c] ptr mem)
-(MOV(WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) -> (MOV(WZ|H|HZ|BZ)load [c] ptr mem)
-(MOV(D|W)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) && c%4 == 0 -> (MOV(D|W)load [c] ptr mem)
-(MOV(WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) -> (MOV(WZ|H|HZ|BZ)load [c] ptr mem)
+(MOV(D|W)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) && c%4 == 0 => (MOV(D|W)load [int32(c)] ptr mem)
+(MOV(WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
+(MOV(D|W)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) && c%4 == 0 => (MOV(D|W)load [int32(c)] ptr mem)
+(MOV(WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
-// Store of zero -> storezero
-(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem)
-(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
-(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
-(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
+// Store of zero => storezero
+(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
// Fold offsets for storezero
-(MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) && (off1+off2)%4 == 0 ->
- (MOVDstorezero [off1+off2] {sym} x mem)
-(MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
- (MOVWstorezero [off1+off2] {sym} x mem)
-(MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
- (MOVHstorezero [off1+off2] {sym} x mem)
-(MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
- (MOVBstorezero [off1+off2] {sym} x mem)
+(MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0 =>
+ (MOVDstorezero [off1+int32(off2)] {sym} x mem)
+(MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) =>
+ (MOVWstorezero [off1+int32(off2)] {sym} x mem)
+(MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) =>
+ (MOVHstorezero [off1+int32(off2)] {sym} x mem)
+(MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) =>
+ (MOVBstorezero [off1+int32(off2)] {sym} x mem)
// Stores with addressing that can be done as indexed stores
-(MOV(D|W|H|B)store [0] {sym} p:(ADD ptr idx) val mem) && sym == nil && p.Uses == 1 -> (MOV(D|W|H|B)storeidx ptr idx val mem)
+(MOV(D|W|H|B)store [0] {sym} p:(ADD ptr idx) val mem) && sym == nil && p.Uses == 1 => (MOV(D|W|H|B)storeidx ptr idx val mem)
// Stores with constant index values can be done without indexed instructions
-(MOVDstoreidx ptr (MOVDconst [c]) val mem) && is16Bit(c) && c%4 == 0 -> (MOVDstore [c] ptr val mem)
-(MOV(W|H|B)storeidx ptr (MOVDconst [c]) val mem) && is16Bit(c) -> (MOV(W|H|B)store [c] ptr val mem)
-(MOVDstoreidx (MOVDconst [c]) ptr val mem) && is16Bit(c) && c%4 == 0 -> (MOVDstore [c] ptr val mem)
-(MOV(W|H|B)storeidx (MOVDconst [c]) ptr val mem) && is16Bit(c) -> (MOV(W|H|B)store [c] ptr val mem)
+(MOVDstoreidx ptr (MOVDconst [c]) val mem) && is16Bit(c) && c%4 == 0 => (MOVDstore [int32(c)] ptr val mem)
+(MOV(W|H|B)storeidx ptr (MOVDconst [c]) val mem) && is16Bit(c) => (MOV(W|H|B)store [int32(c)] ptr val mem)
+(MOVDstoreidx (MOVDconst [c]) ptr val mem) && is16Bit(c) && c%4 == 0 => (MOVDstore [int32(c)] ptr val mem)
+(MOV(W|H|B)storeidx (MOVDconst [c]) ptr val mem) && is16Bit(c) => (MOV(W|H|B)store [int32(c)] ptr val mem)
// Fold symbols into storezero
(MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
// atomic intrinsics
-(AtomicLoad(8|32|64|Ptr) ptr mem) -> (LoweredAtomicLoad(8|32|64|Ptr) [1] ptr mem)
-(AtomicLoadAcq32 ptr mem) -> (LoweredAtomicLoad32 [0] ptr mem)
+(AtomicLoad(8|32|64|Ptr) ptr mem) => (LoweredAtomicLoad(8|32|64|Ptr) [1] ptr mem)
+(AtomicLoadAcq32 ptr mem) => (LoweredAtomicLoad32 [0] ptr mem)
-(AtomicStore(8|32|64) ptr val mem) -> (LoweredAtomicStore(8|32|64) [1] ptr val mem)
-(AtomicStoreRel32 ptr val mem) -> (LoweredAtomicStore32 [0] ptr val mem)
-//(AtomicStorePtrNoWB ptr val mem) -> (STLR ptr val mem)
+(AtomicStore(8|32|64) ptr val mem) => (LoweredAtomicStore(8|32|64) [1] ptr val mem)
+(AtomicStoreRel32 ptr val mem) => (LoweredAtomicStore32 [0] ptr val mem)
+//(AtomicStorePtrNoWB ptr val mem) => (STLR ptr val mem)
-(AtomicExchange(32|64) ...) -> (LoweredAtomicExchange(32|64) ...)
+(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
-(AtomicAdd(32|64) ...) -> (LoweredAtomicAdd(32|64) ...)
+(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
-(AtomicCompareAndSwap(32|64) ptr old new_ mem) -> (LoweredAtomicCas(32|64) [1] ptr old new_ mem)
-(AtomicCompareAndSwapRel32 ptr old new_ mem) -> (LoweredAtomicCas32 [0] ptr old new_ mem)
+(AtomicCompareAndSwap(32|64) ptr old new_ mem) => (LoweredAtomicCas(32|64) [1] ptr old new_ mem)
+(AtomicCompareAndSwapRel32 ptr old new_ mem) => (LoweredAtomicCas32 [0] ptr old new_ mem)
-(AtomicAnd8 ...) -> (LoweredAtomicAnd8 ...)
-(AtomicOr8 ...) -> (LoweredAtomicOr8 ...)
+(AtomicAnd8 ...) => (LoweredAtomicAnd8 ...)
+(AtomicOr8 ...) => (LoweredAtomicOr8 ...)
// Lowering extension
// Note: we always extend to 64 bits even though some ops don't need that many result bits.
-(SignExt8to(16|32|64) ...) -> (MOVBreg ...)
-(SignExt16to(32|64) ...) -> (MOVHreg ...)
-(SignExt32to64 ...) -> (MOVWreg ...)
+(SignExt8to(16|32|64) ...) => (MOVBreg ...)
+(SignExt16to(32|64) ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
-(ZeroExt8to(16|32|64) ...) -> (MOVBZreg ...)
-(ZeroExt16to(32|64) ...) -> (MOVHZreg ...)
-(ZeroExt32to64 ...) -> (MOVWZreg ...)
+(ZeroExt8to(16|32|64) ...) => (MOVBZreg ...)
+(ZeroExt16to(32|64) ...) => (MOVHZreg ...)
+(ZeroExt32to64 ...) => (MOVWZreg ...)
-(Trunc(16|32|64)to8 <t> x) && isSigned(t) -> (MOVBreg x)
-(Trunc(16|32|64)to8 x) -> (MOVBZreg x)
-(Trunc(32|64)to16 <t> x) && isSigned(t) -> (MOVHreg x)
-(Trunc(32|64)to16 x) -> (MOVHZreg x)
-(Trunc64to32 <t> x) && isSigned(t) -> (MOVWreg x)
-(Trunc64to32 x) -> (MOVWZreg x)
+(Trunc(16|32|64)to8 <t> x) && isSigned(t) => (MOVBreg x)
+(Trunc(16|32|64)to8 x) => (MOVBZreg x)
+(Trunc(32|64)to16 <t> x) && isSigned(t) => (MOVHreg x)
+(Trunc(32|64)to16 x) => (MOVHZreg x)
+(Trunc64to32 <t> x) && isSigned(t) => (MOVWreg x)
+(Trunc64to32 x) => (MOVWZreg x)
-(Slicemask <t> x) -> (SRADconst (NEG <t> x) [63])
+(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
// Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
// This may interact with other patterns in the future. (Compare with arm64)
-(MOV(B|H|W)Zreg x:(MOVBZload _ _)) -> x
-(MOV(B|H|W)Zreg x:(MOVBZloadidx _ _ _)) -> x
-(MOV(H|W)Zreg x:(MOVHZload _ _)) -> x
-(MOV(H|W)Zreg x:(MOVHZloadidx _ _ _)) -> x
-(MOV(H|W)reg x:(MOVHload _ _)) -> x
-(MOV(H|W)reg x:(MOVHloadidx _ _ _)) -> x
-(MOVWZreg x:(MOVWZload _ _)) -> x
-(MOVWZreg x:(MOVWZloadidx _ _ _)) -> x
-(MOVWreg x:(MOVWload _ _)) -> x
-(MOVWreg x:(MOVWloadidx _ _ _)) -> x
+(MOV(B|H|W)Zreg x:(MOVBZload _ _)) => x
+(MOV(B|H|W)Zreg x:(MOVBZloadidx _ _ _)) => x
+(MOV(H|W)Zreg x:(MOVHZload _ _)) => x
+(MOV(H|W)Zreg x:(MOVHZloadidx _ _ _)) => x
+(MOV(H|W)reg x:(MOVHload _ _)) => x
+(MOV(H|W)reg x:(MOVHloadidx _ _ _)) => x
+(MOVWZreg x:(MOVWZload _ _)) => x
+(MOVWZreg x:(MOVWZloadidx _ _ _)) => x
+(MOVWreg x:(MOVWload _ _)) => x
+(MOVWreg x:(MOVWloadidx _ _ _)) => x
// don't extend if argument is already extended
-(MOVBreg x:(Arg <t>)) && is8BitInt(t) && isSigned(t) -> x
-(MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !isSigned(t) -> x
-(MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && isSigned(t) -> x
-(MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) -> x
-(MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) -> x
-(MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) -> x
+(MOVBreg x:(Arg <t>)) && is8BitInt(t) && isSigned(t) => x
+(MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !isSigned(t) => x
+(MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && isSigned(t) => x
+(MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) => x
+(MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) => x
+(MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) => x
-(MOVBZreg (MOVDconst [c])) -> (MOVDconst [int64(uint8(c))])
-(MOVBreg (MOVDconst [c])) -> (MOVDconst [int64(int8(c))])
-(MOVHZreg (MOVDconst [c])) -> (MOVDconst [int64(uint16(c))])
-(MOVHreg (MOVDconst [c])) -> (MOVDconst [int64(int16(c))])
-(MOVWreg (MOVDconst [c])) -> (MOVDconst [int64(int32(c))])
-(MOVWZreg (MOVDconst [c])) -> (MOVDconst [int64(uint32(c))])
+(MOVBZreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
+(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
+(MOVHZreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
+(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
+(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
+(MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
// Lose widening ops fed to stores
-(MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
-(MOVHstore [off] {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
-(MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 -> (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
-(MOVBstore [off] {sym} ptr (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 -> (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
-(MOVBstoreidx ptr idx (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) -> (MOVBstoreidx ptr idx x mem)
-(MOVHstoreidx ptr idx (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHstoreidx ptr idx x mem)
-(MOVWstoreidx ptr idx (MOV(W|WZ)reg x) mem) -> (MOVWstoreidx ptr idx x mem)
-(MOVBstoreidx ptr idx (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 -> (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
-(MOVBstoreidx ptr idx (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 -> (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
-(MOVHBRstore {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHBRstore {sym} ptr x mem)
-(MOVWBRstore {sym} ptr (MOV(W|WZ)reg x) mem) -> (MOVWBRstore {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+(MOVBstore [off] {sym} ptr (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+(MOVBstoreidx ptr idx (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVWstoreidx ptr idx (MOV(W|WZ)reg x) mem) => (MOVWstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+(MOVBstoreidx ptr idx (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+(MOVHBRstore {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHBRstore {sym} ptr x mem)
+(MOVWBRstore {sym} ptr (MOV(W|WZ)reg x) mem) => (MOVWBRstore {sym} ptr x mem)
// Lose W-widening ops fed to compare-W
-(CMPW x (MOVWreg y)) -> (CMPW x y)
-(CMPW (MOVWreg x) y) -> (CMPW x y)
-(CMPWU x (MOVWZreg y)) -> (CMPWU x y)
-(CMPWU (MOVWZreg x) y) -> (CMPWU x y)
+(CMPW x (MOVWreg y)) => (CMPW x y)
+(CMPW (MOVWreg x) y) => (CMPW x y)
+(CMPWU x (MOVWZreg y)) => (CMPWU x y)
+(CMPWU (MOVWZreg x) y) => (CMPWU x y)
-(CMP x (MOVDconst [c])) && is16Bit(c) -> (CMPconst x [c])
-(CMP (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPconst y [c]))
-(CMPW x (MOVDconst [c])) && is16Bit(c) -> (CMPWconst x [c])
-(CMPW (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPWconst y [c]))
+(CMP x (MOVDconst [c])) && is16Bit(c) => (CMPconst x [c])
+(CMP (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPconst y [c]))
+(CMPW x (MOVDconst [c])) && is16Bit(c) => (CMPWconst x [int32(c)])
+(CMPW (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPWconst y [int32(c)]))
-(CMPU x (MOVDconst [c])) && isU16Bit(c) -> (CMPUconst x [c])
-(CMPU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPUconst y [c]))
-(CMPWU x (MOVDconst [c])) && isU16Bit(c) -> (CMPWUconst x [c])
-(CMPWU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPWUconst y [c]))
+(CMPU x (MOVDconst [c])) && isU16Bit(c) => (CMPUconst x [c])
+(CMPU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPUconst y [c]))
+(CMPWU x (MOVDconst [c])) && isU16Bit(c) => (CMPWUconst x [int32(c)])
+(CMPWU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPWUconst y [int32(c)]))
// Canonicalize the order of arguments to comparisons - helps with CSE.
-((CMP|CMPW|CMPU|CMPWU) x y) && x.ID > y.ID -> (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
+((CMP|CMPW|CMPU|CMPWU) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
// ISEL auxInt values 0=LT 1=GT 2=EQ arg2 ? arg0 : arg1
// ISEL auxInt values 4=GE 5=LE 6=NE arg2 ? arg1 : arg0
// ISELB special case where arg0, arg1 values are 0, 1
-(Equal cmp) -> (ISELB [2] (MOVDconst [1]) cmp)
-(NotEqual cmp) -> (ISELB [6] (MOVDconst [1]) cmp)
-(LessThan cmp) -> (ISELB [0] (MOVDconst [1]) cmp)
-(FLessThan cmp) -> (ISELB [0] (MOVDconst [1]) cmp)
-(FLessEqual cmp) -> (ISEL [2] (MOVDconst [1]) (ISELB [0] (MOVDconst [1]) cmp) cmp)
-(GreaterEqual cmp) -> (ISELB [4] (MOVDconst [1]) cmp)
-(GreaterThan cmp) -> (ISELB [1] (MOVDconst [1]) cmp)
-(FGreaterThan cmp) -> (ISELB [1] (MOVDconst [1]) cmp)
-(FGreaterEqual cmp) -> (ISEL [2] (MOVDconst [1]) (ISELB [1] (MOVDconst [1]) cmp) cmp)
-(LessEqual cmp) -> (ISELB [5] (MOVDconst [1]) cmp)
-
-(ISELB [0] _ (FlagLT)) -> (MOVDconst [1])
-(ISELB [0] _ (Flag(GT|EQ))) -> (MOVDconst [0])
-(ISELB [1] _ (FlagGT)) -> (MOVDconst [1])
-(ISELB [1] _ (Flag(LT|EQ))) -> (MOVDconst [0])
-(ISELB [2] _ (FlagEQ)) -> (MOVDconst [1])
-(ISELB [2] _ (Flag(LT|GT))) -> (MOVDconst [0])
-(ISELB [4] _ (FlagLT)) -> (MOVDconst [0])
-(ISELB [4] _ (Flag(GT|EQ))) -> (MOVDconst [1])
-(ISELB [5] _ (FlagGT)) -> (MOVDconst [0])
-(ISELB [5] _ (Flag(LT|EQ))) -> (MOVDconst [1])
-(ISELB [6] _ (FlagEQ)) -> (MOVDconst [0])
-(ISELB [6] _ (Flag(LT|GT))) -> (MOVDconst [1])
-
-(ISEL [2] x _ (FlagEQ)) -> x
-(ISEL [2] _ y (Flag(LT|GT))) -> y
-
-(ISEL [6] _ y (FlagEQ)) -> y
-(ISEL [6] x _ (Flag(LT|GT))) -> x
-
-(ISEL [0] _ y (Flag(EQ|GT))) -> y
-(ISEL [0] x _ (FlagLT)) -> x
-
-(ISEL [5] _ x (Flag(EQ|LT))) -> x
-(ISEL [5] y _ (FlagGT)) -> y
-
-(ISEL [1] _ y (Flag(EQ|LT))) -> y
-(ISEL [1] x _ (FlagGT)) -> x
-
-(ISEL [4] x _ (Flag(EQ|GT))) -> x
-(ISEL [4] _ y (FlagLT)) -> y
-
-(ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 0 -> (ISELB [n+1] (MOVDconst [1]) bool)
-(ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 1 -> (ISELB [n-1] (MOVDconst [1]) bool)
-(ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 2 -> (ISELB [n] (MOVDconst [1]) bool)
-(ISEL [n] x y (InvertFlags bool)) && n%4 == 0 -> (ISEL [n+1] x y bool)
-(ISEL [n] x y (InvertFlags bool)) && n%4 == 1 -> (ISEL [n-1] x y bool)
-(ISEL [n] x y (InvertFlags bool)) && n%4 == 2 -> (ISEL [n] x y bool)
+(Equal cmp) => (ISELB [2] (MOVDconst [1]) cmp)
+(NotEqual cmp) => (ISELB [6] (MOVDconst [1]) cmp)
+(LessThan cmp) => (ISELB [0] (MOVDconst [1]) cmp)
+(FLessThan cmp) => (ISELB [0] (MOVDconst [1]) cmp)
+(FLessEqual cmp) => (ISEL [2] (MOVDconst [1]) (ISELB [0] (MOVDconst [1]) cmp) cmp)
+(GreaterEqual cmp) => (ISELB [4] (MOVDconst [1]) cmp)
+(GreaterThan cmp) => (ISELB [1] (MOVDconst [1]) cmp)
+(FGreaterThan cmp) => (ISELB [1] (MOVDconst [1]) cmp)
+(FGreaterEqual cmp) => (ISEL [2] (MOVDconst [1]) (ISELB [1] (MOVDconst [1]) cmp) cmp)
+(LessEqual cmp) => (ISELB [5] (MOVDconst [1]) cmp)
+
+(ISELB [0] _ (FlagLT)) => (MOVDconst [1])
+(ISELB [0] _ (Flag(GT|EQ))) => (MOVDconst [0])
+(ISELB [1] _ (FlagGT)) => (MOVDconst [1])
+(ISELB [1] _ (Flag(LT|EQ))) => (MOVDconst [0])
+(ISELB [2] _ (FlagEQ)) => (MOVDconst [1])
+(ISELB [2] _ (Flag(LT|GT))) => (MOVDconst [0])
+(ISELB [4] _ (FlagLT)) => (MOVDconst [0])
+(ISELB [4] _ (Flag(GT|EQ))) => (MOVDconst [1])
+(ISELB [5] _ (FlagGT)) => (MOVDconst [0])
+(ISELB [5] _ (Flag(LT|EQ))) => (MOVDconst [1])
+(ISELB [6] _ (FlagEQ)) => (MOVDconst [0])
+(ISELB [6] _ (Flag(LT|GT))) => (MOVDconst [1])
+
+(ISEL [2] x _ (FlagEQ)) => x
+(ISEL [2] _ y (Flag(LT|GT))) => y
+
+(ISEL [6] _ y (FlagEQ)) => y
+(ISEL [6] x _ (Flag(LT|GT))) => x
+
+(ISEL [0] _ y (Flag(EQ|GT))) => y
+(ISEL [0] x _ (FlagLT)) => x
+
+(ISEL [5] _ x (Flag(EQ|LT))) => x
+(ISEL [5] y _ (FlagGT)) => y
+
+(ISEL [1] _ y (Flag(EQ|LT))) => y
+(ISEL [1] x _ (FlagGT)) => x
+
+(ISEL [4] x _ (Flag(EQ|GT))) => x
+(ISEL [4] _ y (FlagLT)) => y
+
+(ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 0 => (ISELB [n+1] (MOVDconst [1]) bool)
+(ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 1 => (ISELB [n-1] (MOVDconst [1]) bool)
+(ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 2 => (ISELB [n] (MOVDconst [1]) bool)
+(ISEL [n] x y (InvertFlags bool)) && n%4 == 0 => (ISEL [n+1] x y bool)
+(ISEL [n] x y (InvertFlags bool)) && n%4 == 1 => (ISEL [n-1] x y bool)
+(ISEL [n] x y (InvertFlags bool)) && n%4 == 2 => (ISEL [n] x y bool)
// A particular pattern seen in cgo code:
-(AND (MOVDconst [c]) x:(MOVBZload _ _)) -> (ANDconst [c&0xFF] x)
+(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (ANDconst [c&0xFF] x)
// floating point negative abs
-(FNEG (FABS x)) -> (FNABS x)
-(FNEG (FNABS x)) -> (FABS x)
+(FNEG (FABS x)) => (FNABS x)
+(FNEG (FNABS x)) => (FABS x)
// floating-point fused multiply-add/sub
-(FADD (FMUL x y) z) -> (FMADD x y z)
-(FSUB (FMUL x y) z) -> (FMSUB x y z)
-(FADDS (FMULS x y) z) -> (FMADDS x y z)
-(FSUBS (FMULS x y) z) -> (FMSUBS x y z)
+(FADD (FMUL x y) z) => (FMADD x y z)
+(FSUB (FMUL x y) z) => (FMSUB x y z)
+(FADDS (FMULS x y) z) => (FMADDS x y z)
+(FSUBS (FMULS x y) z) => (FMSUBS x y z)
// The following statements are found in encoding/binary functions UintXX (load) and PutUintXX (store)
// To implement for big endian machines, most rules would have to be duplicated but the
// resulting rule would be reversed, i. e., MOVHZload on little endian would be MOVHBRload on big endian
// and vice versa.
-// b[0] | b[1]<<8 -> load 16-bit Little endian
+// b[0] | b[1]<<8 => load 16-bit Little endian
(OR <t> x0:(MOVBZload [i0] {s} p mem)
o1:(SL(W|D)const x1:(MOVBZload [i1] {s} p mem) [8]))
&& !config.BigEndian
&& o1.Uses == 1
&& mergePoint(b, x0, x1) != nil
&& clobber(x0, x1, o1)
- -> @mergePoint(b,x0,x1) (MOVHZload <t> {s} [i0] p mem)
+ => @mergePoint(b,x0,x1) (MOVHZload <t> {s} [i0] p mem)
-// b[0]<<8 | b[1] -> load 16-bit Big endian on Little endian arch.
+// b[0]<<8 | b[1] => load 16-bit Big endian on Little endian arch.
// Use byte-reverse indexed load for 2 bytes.
(OR <t> x0:(MOVBZload [i1] {s} p mem)
o1:(SL(W|D)const x1:(MOVBZload [i0] {s} p mem) [8]))
&& o1.Uses == 1
&& mergePoint(b, x0, x1) != nil
&& clobber(x0, x1, o1)
- -> @mergePoint(b,x0,x1) (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ => @mergePoint(b,x0,x1) (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
-// b[0]<<n+8 | b[1]<<n -> load 16-bit Big endian (where n%8== 0)
+// b[0]<<n+8 | b[1]<<n => load 16-bit Big endian (where n%8== 0)
// Use byte-reverse indexed load for 2 bytes,
// then shift left to the correct position. Used to match subrules
// from longer rules.
&& s0.Uses == 1 && s1.Uses == 1
&& mergePoint(b, x0, x1) != nil
&& clobber(x0, x1, s0, s1)
- -> @mergePoint(b,x0,x1) (SLDconst <t> (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [n1])
+ => @mergePoint(b,x0,x1) (SLDconst <t> (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [n1])
-// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit Little endian
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 => load 32-bit Little endian
// Use byte-reverse indexed load for 4 bytes.
(OR <t> s1:(SL(W|D)const x2:(MOVBZload [i3] {s} p mem) [24])
o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i2] {s} p mem) [16])
&& s0.Uses == 1 && s1.Uses == 1
&& mergePoint(b, x0, x1, x2) != nil
&& clobber(x0, x1, x2, s0, s1, o0)
- -> @mergePoint(b,x0,x1,x2) (MOVWZload <t> {s} [i0] p mem)
+ => @mergePoint(b,x0,x1,x2) (MOVWZload <t> {s} [i0] p mem)
-// b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] -> load 32-bit Big endian order on Little endian arch
+// b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] => load 32-bit Big endian order on Little endian arch
// Use byte-reverse indexed load for 4 bytes with computed address.
// Could be used to match subrules of a longer rule.
(OR <t> s1:(SL(W|D)const x2:(MOVBZload [i0] {s} p mem) [24])
&& s0.Uses == 1 && s1.Uses == 1
&& mergePoint(b, x0, x1, x2) != nil
&& clobber(x0, x1, x2, s0, s1, o0)
- -> @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ => @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
-// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit Big endian order on Little endian arch
+// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 => load 32-bit Big endian order on Little endian arch
// Use byte-reverse indexed load for 4 bytes with computed address.
// Could be used to match subrules of a longer rule.
(OR <t> x0:(MOVBZload [i3] {s} p mem)
&& s0.Uses == 1 && s1.Uses == 1
&& mergePoint(b, x0, x1, x2) != nil
&& clobber(x0, x1, x2, s0, s1, o0)
- -> @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ => @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
-// b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 -> load 32-bit Big endian order on Little endian arch
+// b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 => load 32-bit Big endian order on Little endian arch
// Use byte-reverse indexed load to for 4 bytes with computed address.
// Used to match longer rules.
(OR <t> s2:(SLDconst x2:(MOVBZload [i3] {s} p mem) [32])
&& s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1
&& mergePoint(b, x0, x1, x2) != nil
&& clobber(x0, x1, x2, s0, s1, s2, o0)
- -> @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
+ => @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
-// b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 32-bit Big endian order on Little endian arch
+// b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 => load 32-bit Big endian order on Little endian arch
// Use byte-reverse indexed load for 4 bytes with constant address.
// Used to match longer rules.
(OR <t> s2:(SLDconst x2:(MOVBZload [i0] {s} p mem) [56])
&& s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1
&& mergePoint(b, x0, x1, x2) != nil
&& clobber(x0, x1, x2, s0, s1, s2, o0)
- -> @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
+ => @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
-// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4] <<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit Little endian
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4] <<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 => load 64-bit Little endian
// Rules with commutative ops and many operands will result in extremely large functions in rewritePPC64,
// so matching shorter previously defined subrules is important.
// Offset must be multiple of 4 for MOVD
&& s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1
&& mergePoint(b, x0, x4, x5, x6, x7) != nil
&& clobber(x0, x4, x5, x6, x7, s3, s4, s5, s6, o3, o4, o5)
- -> @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload <t> {s} [i0] p mem)
+ => @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload <t> {s} [i0] p mem)
// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 load 64-bit Big endian ordered bytes on Little endian arch
// Use byte-reverse indexed load of 8 bytes.
&& s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
&& mergePoint(b, x0, x1, x2, x3, x4) != nil
&& clobber(x0, x1, x2, x3, x4, o0, o1, o2, s0, s1, s2, s3)
- -> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
-// b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] -> load 64-bit Big endian ordered bytes on Little endian arch
+// b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] => load 64-bit Big endian ordered bytes on Little endian arch
// Use byte-reverse indexed load of 8 bytes.
// Rules with commutative ops and many operands can result in extremely large functions in rewritePPC64,
// so matching shorter previously defined subrules is important.
&& s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1
&& mergePoint(b, x3, x4, x5, x6, x7) != nil
&& clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)
- -> @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ => @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
// 2 byte store Little endian as in:
// b[0] = byte(v >> 16)
&& x0.Uses == 1
&& i1 == i0+1
&& clobber(x0)
- -> (MOVHstore [i0] {s} p (SRWconst <typ.UInt16> w [16]) mem)
+ => (MOVHstore [i0] {s} p (SRWconst <typ.UInt16> w [16]) mem)
// 2 byte store Little endian as in:
// b[0] = byte(v)
&& x0.Uses == 1
&& i1 == i0+1
&& clobber(x0)
- -> (MOVHstore [i0] {s} p w mem)
+ => (MOVHstore [i0] {s} p w mem)
// 4 byte store Little endian as in:
// b[0:1] = uint16(v)
&& x0.Uses == 1
&& i1 == i0+2
&& clobber(x0)
- -> (MOVWstore [i0] {s} p w mem)
+ => (MOVWstore [i0] {s} p w mem)
// 4 byte store Big endian as in:
// b[0] = byte(v >> 24)
&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
&& i1 == i0+1 && i2 == i0+2 && i3 == i0+3
&& clobber(x0, x1, x2)
- -> (MOVWBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
+ => (MOVWBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
// The 2 byte store appears after the 4 byte store so that the
// match for the 2 byte store is not done first.
&& x0.Uses == 1
&& i1 == i0+1
&& clobber(x0)
- -> (MOVHBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
+ => (MOVHBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
// 8 byte store Little endian as in:
// b[0] = byte(v)
&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
&& i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7
&& clobber(x0, x1, x2, x3)
- -> (MOVDstore [i0] {s} p w mem)
+ => (MOVDstore [i0] {s} p w mem)
// 8 byte store Big endian as in:
// b[0] = byte(v >> 56)
&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1
&& i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7
&& clobber(x0, x1, x2, x3, x4, x5, x6)
- -> (MOVDBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
+ => (MOVDBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
v.Op = OpPPC64ADD
return true
case OpAddr:
- v.Op = OpPPC64MOVDaddr
- return true
+ return rewriteValuePPC64_OpAddr(v)
case OpAnd16:
v.Op = OpPPC64AND
return true
case OpCondSelect:
return rewriteValuePPC64_OpCondSelect(v)
case OpConst16:
- v.Op = OpPPC64MOVDconst
- return true
+ return rewriteValuePPC64_OpConst16(v)
case OpConst32:
- v.Op = OpPPC64MOVDconst
- return true
+ return rewriteValuePPC64_OpConst32(v)
case OpConst32F:
v.Op = OpPPC64FMOVSconst
return true
case OpConst64:
- v.Op = OpPPC64MOVDconst
- return true
+ return rewriteValuePPC64_OpConst64(v)
case OpConst64F:
v.Op = OpPPC64FMOVDconst
return true
case OpConst8:
- v.Op = OpPPC64MOVDconst
- return true
+ return rewriteValuePPC64_OpConst8(v)
case OpConstBool:
- v.Op = OpPPC64MOVDconst
- return true
+ return rewriteValuePPC64_OpConstBool(v)
case OpConstNil:
return rewriteValuePPC64_OpConstNil(v)
case OpCopysign:
case OpDiv16u:
return rewriteValuePPC64_OpDiv16u(v)
case OpDiv32:
- v.Op = OpPPC64DIVW
- return true
+ return rewriteValuePPC64_OpDiv32(v)
case OpDiv32F:
v.Op = OpPPC64FDIVS
return true
v.Op = OpPPC64DIVWU
return true
case OpDiv64:
- v.Op = OpPPC64DIVD
- return true
+ return rewriteValuePPC64_OpDiv64(v)
case OpDiv64F:
v.Op = OpPPC64FDIV
return true
}
return false
}
+func rewriteValuePPC64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVDaddr {sym} [0] base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpPPC64MOVDaddr)
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
func rewriteValuePPC64_OpAtomicCompareAndSwap32(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
new_ := v_2
mem := v_3
v.reset(OpPPC64LoweredAtomicCas32)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
v.AddArg4(ptr, old, new_, mem)
return true
}
new_ := v_2
mem := v_3
v.reset(OpPPC64LoweredAtomicCas64)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
v.AddArg4(ptr, old, new_, mem)
return true
}
new_ := v_2
mem := v_3
v.reset(OpPPC64LoweredAtomicCas32)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
v.AddArg4(ptr, old, new_, mem)
return true
}
ptr := v_0
mem := v_1
v.reset(OpPPC64LoweredAtomicLoad32)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
v.AddArg2(ptr, mem)
return true
}
ptr := v_0
mem := v_1
v.reset(OpPPC64LoweredAtomicLoad64)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
v.AddArg2(ptr, mem)
return true
}
ptr := v_0
mem := v_1
v.reset(OpPPC64LoweredAtomicLoad8)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
v.AddArg2(ptr, mem)
return true
}
ptr := v_0
mem := v_1
v.reset(OpPPC64LoweredAtomicLoad32)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
v.AddArg2(ptr, mem)
return true
}
ptr := v_0
mem := v_1
v.reset(OpPPC64LoweredAtomicLoadPtr)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
v.AddArg2(ptr, mem)
return true
}
val := v_1
mem := v_2
v.reset(OpPPC64LoweredAtomicStore32)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
v.AddArg3(ptr, val, mem)
return true
}
val := v_1
mem := v_2
v.reset(OpPPC64LoweredAtomicStore64)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
v.AddArg3(ptr, val, mem)
return true
}
val := v_1
mem := v_2
v.reset(OpPPC64LoweredAtomicStore8)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
v.AddArg3(ptr, val, mem)
return true
}
val := v_1
mem := v_2
v.reset(OpPPC64LoweredAtomicStore32)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
v.AddArg3(ptr, val, mem)
return true
}
y := v_1
v.reset(OpPPC64ADD)
v0 := b.NewValue0(v.Pos, OpPPC64SRDconst, t)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpPPC64SUB, t)
v1.AddArg2(x, y)
v0.AddArg(v1)
x := v_0
v.reset(OpPPC64SUB)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v0.AuxInt = 32
+ v0.AuxInt = int64ToAuxInt(32)
v1 := b.NewValue0(v.Pos, OpPPC64CNTLZW, typ.Int)
v1.AddArg(x)
v.AddArg2(v0, v1)
x := v_0
v.reset(OpPPC64SUB)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v0.AuxInt = 64
+ v0.AuxInt = int64ToAuxInt(64)
v1 := b.NewValue0(v.Pos, OpPPC64CNTLZD, typ.Int)
v1.AddArg(x)
v.AddArg2(v0, v1)
break
}
v.reset(OpPPC64ISEL)
- v.AuxInt = 2
+ v.AuxInt = int32ToAuxInt(2)
v.AddArg3(x, y, bool)
return true
}
break
}
v.reset(OpPPC64ISEL)
- v.AuxInt = 2
+ v.AuxInt = int32ToAuxInt(2)
v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v0.AddArg(bool)
v.AddArg3(x, y, v0)
return true
}
return false
}
+func rewriteValuePPC64_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst64(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt64(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [b])
+ // result: (MOVDconst [b2i(b)])
+ for {
+ b := auxIntToBool(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(b))
+ return true
+ }
+}
func rewriteValuePPC64_OpConstNil(v *Value) bool {
// match: (ConstNil)
// result: (MOVDconst [0])
for {
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
}
v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int16)
v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int16)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v2.AddArg(x)
v1.AddArg2(v2, x)
v0.AddArg(v1)
v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int)
v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v2.AddArg(x)
v1.AddArg2(v2, x)
v0.AddArg(v1)
v.reset(OpPPC64POPCNTD)
v0 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int64)
v1 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v1.AddArg(x)
v0.AddArg2(v1, x)
v.AddArg(v0)
v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.UInt8)
v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.UInt8)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v2.AddArg(x)
v1.AddArg2(v2, x)
v0.AddArg(v1)
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (Div16 x y)
+ // match: (Div16 [false] x y)
// result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
x := v_0
y := v_1
v.reset(OpPPC64DIVW)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValuePPC64_OpDiv16u(v *Value) bool {
v_1 := v.Args[1]
return true
}
}
+func rewriteValuePPC64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div32 [false] x y)
+ // result: (DIVW x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 [false] x y)
+ // result: (DIVD x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpDiv8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
x := v_0
y := v_1
v.reset(OpPPC64ANDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64)
v0.AddArg2(x, y)
v.AddArg(v0)
ptr := v_0
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v0.AddArg(ptr)
v.AddArg(v0)
return true
// match: (LocalAddr {sym} base _)
// result: (MOVDaddr {sym} base)
for {
- sym := v.Aux
+ sym := auxToSym(v.Aux)
base := v_0
v.reset(OpPPC64MOVDaddr)
- v.Aux = sym
+ v.Aux = symToAux(sym)
v.AddArg(base)
return true
}
y := v_1
v.reset(OpPPC64SLW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 16
+ v4.AuxInt = int64ToAuxInt(16)
v2.AddArg2(v3, v4)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 16) {
break
}
v.reset(OpPPC64SLWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
y := v_1
v.reset(OpPPC64SLW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = 16
+ v3.AuxInt = int64ToAuxInt(16)
v2.AddArg2(y, v3)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 16) {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (Lsh16x64 x (MOVDconst [c]))
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) < 16) {
break
}
v.reset(OpPPC64SLWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
y := v_1
v.reset(OpPPC64SLW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = 16
+ v3.AuxInt = int64ToAuxInt(16)
v2.AddArg2(y, v3)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SLW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 16
+ v4.AuxInt = int64ToAuxInt(16)
v2.AddArg2(v3, v4)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SLW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 32
+ v4.AuxInt = int64ToAuxInt(32)
v2.AddArg2(v3, v4)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 32) {
break
}
v.reset(OpPPC64SLWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
y := v_1
v.reset(OpPPC64SLW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = 32
+ v3.AuxInt = int64ToAuxInt(32)
v2.AddArg2(y, v3)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 32) {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (Lsh32x64 x (MOVDconst [c]))
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) < 32) {
break
}
v.reset(OpPPC64SLWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
v_1_1 := v_1.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
y := v_1_0
- if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 31 {
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 31 {
continue
}
v.reset(OpPPC64SLW)
v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32)
- v0.AuxInt = 31
+ v0.AuxInt = int64ToAuxInt(31)
v0.AddArg(y)
v.AddArg2(x, v0)
return true
// result: (SLW x (ANDconst <typ.Int32> [31] y))
for {
x := v_0
- if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.Int32 || v_1.AuxInt != 31 {
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.Int32 || auxIntToInt64(v_1.AuxInt) != 31 {
break
}
y := v_1.Args[0]
v.reset(OpPPC64SLW)
v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32)
- v0.AuxInt = 31
+ v0.AuxInt = int64ToAuxInt(31)
v0.AddArg(y)
v.AddArg2(x, v0)
return true
y := v_1
v.reset(OpPPC64SLW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = 32
+ v3.AuxInt = int64ToAuxInt(32)
v2.AddArg2(y, v3)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SLW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 32
+ v4.AuxInt = int64ToAuxInt(32)
v2.AddArg2(v3, v4)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SLD)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 64
+ v4.AuxInt = int64ToAuxInt(64)
v2.AddArg2(v3, v4)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 64) {
break
}
v.reset(OpPPC64SLDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
y := v_1
v.reset(OpPPC64SLD)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v2.AddArg2(y, v3)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 64) {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (Lsh64x64 x (MOVDconst [c]))
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) < 64) {
break
}
v.reset(OpPPC64SLDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
v_1_1 := v_1.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
y := v_1_0
- if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 63 {
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 63 {
continue
}
v.reset(OpPPC64SLD)
v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64)
- v0.AuxInt = 63
+ v0.AuxInt = int64ToAuxInt(63)
v0.AddArg(y)
v.AddArg2(x, v0)
return true
// result: (SLD x (ANDconst <typ.Int64> [63] y))
for {
x := v_0
- if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.Int64 || v_1.AuxInt != 63 {
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.Int64 || auxIntToInt64(v_1.AuxInt) != 63 {
break
}
y := v_1.Args[0]
v.reset(OpPPC64SLD)
v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64)
- v0.AuxInt = 63
+ v0.AuxInt = int64ToAuxInt(63)
v0.AddArg(y)
v.AddArg2(x, v0)
return true
y := v_1
v.reset(OpPPC64SLD)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v2.AddArg2(y, v3)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SLD)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 64
+ v4.AuxInt = int64ToAuxInt(64)
v2.AddArg2(v3, v4)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SLW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 8
+ v4.AuxInt = int64ToAuxInt(8)
v2.AddArg2(v3, v4)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 8) {
break
}
v.reset(OpPPC64SLWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
y := v_1
v.reset(OpPPC64SLW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = 8
+ v3.AuxInt = int64ToAuxInt(8)
v2.AddArg2(y, v3)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 8) {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (Lsh8x64 x (MOVDconst [c]))
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) < 8) {
break
}
v.reset(OpPPC64SLWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
y := v_1
v.reset(OpPPC64SLW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = 8
+ v3.AuxInt = int64ToAuxInt(8)
v2.AddArg2(y, v3)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SLW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 8
+ v4.AuxInt = int64ToAuxInt(8)
v2.AddArg2(v3, v4)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
// match: (Move [0] _ _ mem)
// result: mem
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
mem := v_2
// match: (Move [1] dst src mem)
// result: (MOVBstore dst (MOVBZload src mem) mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt64(v.AuxInt) != 1 {
break
}
dst := v_0
// match: (Move [2] dst src mem)
// result: (MOVHstore dst (MOVHZload src mem) mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt64(v.AuxInt) != 2 {
break
}
dst := v_0
// match: (Move [4] dst src mem)
// result: (MOVWstore dst (MOVWZload src mem) mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
dst := v_0
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVDstore dst (MOVDload src mem) mem)
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpPPC64MOVDstore)
// match: (Move [8] dst src mem)
// result: (MOVWstore [4] dst (MOVWZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpPPC64MOVWstore)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
- v0.AuxInt = 4
+ v0.AuxInt = int32ToAuxInt(4)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
// match: (Move [3] dst src mem)
// result: (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))
for {
- if v.AuxInt != 3 {
+ if auxIntToInt64(v.AuxInt) != 3 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpPPC64MOVBstore)
- v.AuxInt = 2
+ v.AuxInt = int32ToAuxInt(2)
v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
- v0.AuxInt = 2
+ v0.AuxInt = int32ToAuxInt(2)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpPPC64MOVHload, typ.Int16)
// match: (Move [5] dst src mem)
// result: (MOVBstore [4] dst (MOVBZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
for {
- if v.AuxInt != 5 {
+ if auxIntToInt64(v.AuxInt) != 5 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpPPC64MOVBstore)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
- v0.AuxInt = 4
+ v0.AuxInt = int32ToAuxInt(4)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
// match: (Move [6] dst src mem)
// result: (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
for {
- if v.AuxInt != 6 {
+ if auxIntToInt64(v.AuxInt) != 6 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpPPC64MOVHstore)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16)
- v0.AuxInt = 4
+ v0.AuxInt = int32ToAuxInt(4)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
// match: (Move [7] dst src mem)
// result: (MOVBstore [6] dst (MOVBZload [6] src mem) (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)))
for {
- if v.AuxInt != 7 {
+ if auxIntToInt64(v.AuxInt) != 7 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpPPC64MOVBstore)
- v.AuxInt = 6
+ v.AuxInt = int32ToAuxInt(6)
v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
- v0.AuxInt = 6
+ v0.AuxInt = int32ToAuxInt(6)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, types.TypeMem)
- v1.AuxInt = 4
+ v1.AuxInt = int32ToAuxInt(4)
v2 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16)
- v2.AuxInt = 4
+ v2.AuxInt = int32ToAuxInt(4)
v2.AddArg2(src, mem)
v3 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
v4 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
// cond: s > 8 && objabi.GOPPC64 <= 8 && logLargeCopy(v, s)
// result: (LoweredMove [s] dst src mem)
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
break
}
v.reset(OpPPC64LoweredMove)
- v.AuxInt = s
+ v.AuxInt = int64ToAuxInt(s)
v.AddArg3(dst, src, mem)
return true
}
// cond: s > 8 && s <= 64 && objabi.GOPPC64 >= 9
// result: (LoweredQuadMoveShort [s] dst src mem)
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
break
}
v.reset(OpPPC64LoweredQuadMoveShort)
- v.AuxInt = s
+ v.AuxInt = int64ToAuxInt(s)
v.AddArg3(dst, src, mem)
return true
}
// cond: s > 8 && objabi.GOPPC64 >= 9 && logLargeCopy(v, s)
// result: (LoweredQuadMove [s] dst src mem)
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
break
}
v.reset(OpPPC64LoweredQuadMove)
- v.AuxInt = s
+ v.AuxInt = int64ToAuxInt(s)
v.AddArg3(dst, src, mem)
return true
}
for {
x := v_0
v.reset(OpPPC64XORconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
v.AddArg(x)
return true
}
// match: (OffPtr [off] ptr)
// result: (ADD (MOVDconst <typ.Int64> [off]) ptr)
for {
- off := v.AuxInt
+ off := auxIntToInt64(v.AuxInt)
ptr := v_0
v.reset(OpPPC64ADD)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v0.AuxInt = off
+ v0.AuxInt = int64ToAuxInt(off)
v.AddArg2(v0, ptr)
return true
}
if v_0.Op != OpPPC64SLDconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != OpPPC64SRDconst {
continue
}
- d := v_1.AuxInt
+ d := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(d == 64-c) {
continue
}
v.reset(OpPPC64ROTLconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SLWconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != OpPPC64SRWconst {
continue
}
- d := v_1.AuxInt
+ d := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(d == 32-c) {
continue
}
v.reset(OpPPC64ROTLWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || v_0_1.AuxInt != 63 {
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
}
_ = v_1_1.Args[1]
v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 64 {
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 64 {
continue
}
v_1_1_1 := v_1_1.Args[1]
- if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 63 || y != v_1_1_1.Args[0] {
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 63 || y != v_1_1_1.Args[0] {
continue
}
v.reset(OpPPC64ROTL)
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || v_0_1.AuxInt != 31 {
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
}
_ = v_1_1.Args[1]
v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 32 {
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 32 {
continue
}
v_1_1_1 := v_1_1.Args[1]
- if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 31 || y != v_1_1_1.Args[0] {
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 31 || y != v_1_1_1.Args[0] {
continue
}
v.reset(OpPPC64ROTLW)
if v_1.Op != OpPPC64MOVDconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(is32Bit(c)) {
continue
}
v.reset(OpPPC64ADDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// cond: is32Bit(c+d)
// result: (ADDconst [c+d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64ADDconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(c + d)) {
break
}
v.reset(OpPPC64ADDconst)
- v.AuxInt = c + d
+ v.AuxInt = int64ToAuxInt(c + d)
v.AddArg(x)
return true
}
// match: (ADDconst [0] x)
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
x := v_0
return true
}
// match: (ADDconst [c] (MOVDaddr [d] {sym} x))
- // result: (MOVDaddr [c+d] {sym} x)
+ // cond: is32Bit(c+int64(d))
+ // result: (MOVDaddr [int32(c+int64(d))] {sym} x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64MOVDaddr {
break
}
- d := v_0.AuxInt
- sym := v_0.Aux
+ d := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
x := v_0.Args[0]
+ if !(is32Bit(c + int64(d))) {
+ break
+ }
v.reset(OpPPC64MOVDaddr)
- v.AuxInt = c + d
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(int32(c + int64(d)))
+ v.Aux = symToAux(sym)
v.AddArg(x)
return true
}
func rewriteValuePPC64_OpPPC64ADDconstForCarry(v *Value) bool {
v_0 := v.Args[0]
// match: (ADDconstForCarry [c] (MOVDconst [d]))
- // cond: int64(int16(c)) < 0 && (int64(int16(c)) < 0 || int64(int16(c)) + d >= 0)
+ // cond: c < 0 && (int64(c) < 0 || int64(c) + d >= 0)
// result: (FlagCarryClear)
for {
- c := v.AuxInt
+ c := auxIntToInt16(v.AuxInt)
if v_0.Op != OpPPC64MOVDconst {
break
}
- d := v_0.AuxInt
- if !(int64(int16(c)) < 0 && (int64(int16(c)) < 0 || int64(int16(c))+d >= 0)) {
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(c < 0 && (int64(c) < 0 || int64(c)+d >= 0)) {
break
}
v.reset(OpPPC64FlagCarryClear)
return true
}
// match: (ADDconstForCarry [c] (MOVDconst [d]))
- // cond: int64(int16(c)) < 0 && int64(int16(c)) >= 0 && int64(int16(c)) + d < 0
+ // cond: c < 0 && int64(c) >= 0 && int64(c) + d < 0
// result: (FlagCarrySet)
for {
- c := v.AuxInt
+ c := auxIntToInt16(v.AuxInt)
if v_0.Op != OpPPC64MOVDconst {
break
}
- d := v_0.AuxInt
- if !(int64(int16(c)) < 0 && int64(int16(c)) >= 0 && int64(int16(c))+d < 0) {
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(c < 0 && int64(c) >= 0 && int64(c)+d < 0) {
break
}
v.reset(OpPPC64FlagCarrySet)
if v_0.Op != OpPPC64MOVDconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if v_1.Op != OpPPC64MOVDconst {
continue
}
- d := v_1.AuxInt
+ d := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64MOVDconst)
- v.AuxInt = c & d
+ v.AuxInt = int64ToAuxInt(c & d)
return true
}
break
if v_1.Op != OpPPC64MOVDconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(isU16Bit(c)) {
continue
}
v.reset(OpPPC64ANDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64MOVDconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
y := v_1
if y.Op != OpPPC64MOVWZreg || !(c&0xFFFFFFFF == 0xFFFFFFFF) {
continue
// result: (MOVWZreg x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpPPC64MOVDconst || v_0.AuxInt != 0xFFFFFFFF {
+ if v_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0xFFFFFFFF {
continue
}
y := v_1
if v_0.Op != OpPPC64MOVDconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
if x.Op != OpPPC64MOVBZload {
continue
}
v.reset(OpPPC64ANDconst)
- v.AuxInt = c & 0xFF
+ v.AuxInt = int64ToAuxInt(c & 0xFF)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if v_1.Op != OpPPC64MOVDconst {
break
}
- d := v_1.AuxInt
+ d := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64MOVDconst)
- v.AuxInt = c &^ d
+ v.AuxInt = int64ToAuxInt(c &^ d)
return true
}
return false
// match: (ANDconst [c] (ANDconst [d] x))
// result: (ANDconst [c&d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64ANDconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpPPC64ANDconst)
- v.AuxInt = c & d
+ v.AuxInt = int64ToAuxInt(c & d)
v.AddArg(x)
return true
}
// match: (ANDconst [-1] x)
// result: x
for {
- if v.AuxInt != -1 {
+ if auxIntToInt64(v.AuxInt) != -1 {
break
}
x := v_0
// match: (ANDconst [0] _)
// result: (MOVDconst [0])
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (ANDconst [c] y:(MOVBZreg _))
// cond: c&0xFF == 0xFF
// result: y
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
y := v_0
if y.Op != OpPPC64MOVBZreg || !(c&0xFF == 0xFF) {
break
// match: (ANDconst [0xFF] y:(MOVBreg _))
// result: y
for {
- if v.AuxInt != 0xFF {
+ if auxIntToInt64(v.AuxInt) != 0xFF {
break
}
y := v_0
// cond: c&0xFFFF == 0xFFFF
// result: y
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
y := v_0
if y.Op != OpPPC64MOVHZreg || !(c&0xFFFF == 0xFFFF) {
break
// match: (ANDconst [0xFFFF] y:(MOVHreg _))
// result: y
for {
- if v.AuxInt != 0xFFFF {
+ if auxIntToInt64(v.AuxInt) != 0xFFFF {
break
}
y := v_0
// match: (ANDconst [c] (MOVBreg x))
// result: (ANDconst [c&0xFF] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64MOVBreg {
break
}
x := v_0.Args[0]
v.reset(OpPPC64ANDconst)
- v.AuxInt = c & 0xFF
+ v.AuxInt = int64ToAuxInt(c & 0xFF)
v.AddArg(x)
return true
}
// match: (ANDconst [c] (MOVBZreg x))
// result: (ANDconst [c&0xFF] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64MOVBZreg {
break
}
x := v_0.Args[0]
v.reset(OpPPC64ANDconst)
- v.AuxInt = c & 0xFF
+ v.AuxInt = int64ToAuxInt(c & 0xFF)
v.AddArg(x)
return true
}
// match: (ANDconst [c] (MOVHreg x))
// result: (ANDconst [c&0xFFFF] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64MOVHreg {
break
}
x := v_0.Args[0]
v.reset(OpPPC64ANDconst)
- v.AuxInt = c & 0xFFFF
+ v.AuxInt = int64ToAuxInt(c & 0xFFFF)
v.AddArg(x)
return true
}
// match: (ANDconst [c] (MOVHZreg x))
// result: (ANDconst [c&0xFFFF] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64MOVHZreg {
break
}
x := v_0.Args[0]
v.reset(OpPPC64ANDconst)
- v.AuxInt = c & 0xFFFF
+ v.AuxInt = int64ToAuxInt(c & 0xFFFF)
v.AddArg(x)
return true
}
// match: (ANDconst [c] (MOVWreg x))
// result: (ANDconst [c&0xFFFFFFFF] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64MOVWreg {
break
}
x := v_0.Args[0]
v.reset(OpPPC64ANDconst)
- v.AuxInt = c & 0xFFFFFFFF
+ v.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF)
v.AddArg(x)
return true
}
// match: (ANDconst [c] (MOVWZreg x))
// result: (ANDconst [c&0xFFFFFFFF] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64MOVWZreg {
break
}
x := v_0.Args[0]
v.reset(OpPPC64ANDconst)
- v.AuxInt = c & 0xFFFFFFFF
+ v.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF)
v.AddArg(x)
return true
}
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(is16Bit(c)) {
break
}
v.reset(OpPPC64CMPconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
y := v_1
if !(is16Bit(c)) {
break
}
v.reset(OpPPC64InvertFlags)
v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(y)
v.AddArg(v0)
return true
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(isU16Bit(c)) {
break
}
v.reset(OpPPC64CMPUconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
y := v_1
if !(isU16Bit(c)) {
break
}
v.reset(OpPPC64InvertFlags)
v0 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(y)
v.AddArg(v0)
return true
// cond: x==y
// result: (FlagEQ)
for {
- y := v.AuxInt
+ y := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64MOVDconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if !(x == y) {
break
}
// cond: uint64(x)<uint64(y)
// result: (FlagLT)
for {
- y := v.AuxInt
+ y := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64MOVDconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if !(uint64(x) < uint64(y)) {
break
}
// cond: uint64(x)>uint64(y)
// result: (FlagGT)
for {
- y := v.AuxInt
+ y := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64MOVDconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if !(uint64(x) > uint64(y)) {
break
}
}
// match: (CMPW x (MOVDconst [c]))
// cond: is16Bit(c)
- // result: (CMPWconst x [c])
+ // result: (CMPWconst x [int32(c)])
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(is16Bit(c)) {
break
}
v.reset(OpPPC64CMPWconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg(x)
return true
}
// match: (CMPW (MOVDconst [c]) y)
// cond: is16Bit(c)
- // result: (InvertFlags (CMPWconst y [c]))
+ // result: (InvertFlags (CMPWconst y [int32(c)]))
for {
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
y := v_1
if !(is16Bit(c)) {
break
}
v.reset(OpPPC64InvertFlags)
v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int32ToAuxInt(int32(c))
v0.AddArg(y)
v.AddArg(v0)
return true
}
// match: (CMPWU x (MOVDconst [c]))
// cond: isU16Bit(c)
- // result: (CMPWUconst x [c])
+ // result: (CMPWUconst x [int32(c)])
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(isU16Bit(c)) {
break
}
v.reset(OpPPC64CMPWUconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg(x)
return true
}
// match: (CMPWU (MOVDconst [c]) y)
// cond: isU16Bit(c)
- // result: (InvertFlags (CMPWUconst y [c]))
+ // result: (InvertFlags (CMPWUconst y [int32(c)]))
for {
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
y := v_1
if !(isU16Bit(c)) {
break
}
v.reset(OpPPC64InvertFlags)
v0 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int32ToAuxInt(int32(c))
v0.AddArg(y)
v.AddArg(v0)
return true
// cond: int32(x)==int32(y)
// result: (FlagEQ)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != OpPPC64MOVDconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if !(int32(x) == int32(y)) {
break
}
// cond: uint32(x)<uint32(y)
// result: (FlagLT)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != OpPPC64MOVDconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if !(uint32(x) < uint32(y)) {
break
}
// cond: uint32(x)>uint32(y)
// result: (FlagGT)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != OpPPC64MOVDconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if !(uint32(x) > uint32(y)) {
break
}
// cond: int32(x)==int32(y)
// result: (FlagEQ)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != OpPPC64MOVDconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if !(int32(x) == int32(y)) {
break
}
// cond: int32(x)<int32(y)
// result: (FlagLT)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != OpPPC64MOVDconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if !(int32(x) < int32(y)) {
break
}
// cond: int32(x)>int32(y)
// result: (FlagGT)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != OpPPC64MOVDconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if !(int32(x) > int32(y)) {
break
}
// cond: x==y
// result: (FlagEQ)
for {
- y := v.AuxInt
+ y := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64MOVDconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if !(x == y) {
break
}
// cond: x<y
// result: (FlagLT)
for {
- y := v.AuxInt
+ y := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64MOVDconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if !(x < y) {
break
}
// cond: x>y
// result: (FlagGT)
for {
- y := v.AuxInt
+ y := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64MOVDconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToInt64(v_0.AuxInt)
if !(x > y) {
break
}
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (Equal (FlagLT))
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (Equal (FlagGT))
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (Equal (InvertFlags x))
for {
cmp := v_0
v.reset(OpPPC64ISELB)
- v.AuxInt = 2
+ v.AuxInt = int32ToAuxInt(2)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v.AddArg2(v0, cmp)
return true
}
func rewriteValuePPC64_OpPPC64FABS(v *Value) bool {
v_0 := v.Args[0]
// match: (FABS (FMOVDconst [x]))
- // result: (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))])
+ // result: (FMOVDconst [math.Abs(x)])
for {
if v_0.Op != OpPPC64FMOVDconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToFloat64(v_0.AuxInt)
v.reset(OpPPC64FMOVDconst)
- v.AuxInt = auxFrom64F(math.Abs(auxTo64F(x)))
+ v.AuxInt = float64ToAuxInt(math.Abs(x))
return true
}
return false
func rewriteValuePPC64_OpPPC64FCEIL(v *Value) bool {
v_0 := v.Args[0]
// match: (FCEIL (FMOVDconst [x]))
- // result: (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))])
+ // result: (FMOVDconst [math.Ceil(x)])
for {
if v_0.Op != OpPPC64FMOVDconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToFloat64(v_0.AuxInt)
v.reset(OpPPC64FMOVDconst)
- v.AuxInt = auxFrom64F(math.Ceil(auxTo64F(x)))
+ v.AuxInt = float64ToAuxInt(math.Ceil(x))
return true
}
return false
func rewriteValuePPC64_OpPPC64FFLOOR(v *Value) bool {
v_0 := v.Args[0]
// match: (FFLOOR (FMOVDconst [x]))
- // result: (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))])
+ // result: (FMOVDconst [math.Floor(x)])
for {
if v_0.Op != OpPPC64FMOVDconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToFloat64(v_0.AuxInt)
v.reset(OpPPC64FMOVDconst)
- v.AuxInt = auxFrom64F(math.Floor(auxTo64F(x)))
+ v.AuxInt = float64ToAuxInt(math.Floor(x))
return true
}
return false
for {
cmp := v_0
v.reset(OpPPC64ISEL)
- v.AuxInt = 2
+ v.AuxInt = int32ToAuxInt(2)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpPPC64ISELB, typ.Int32)
- v1.AuxInt = 1
+ v1.AuxInt = int32ToAuxInt(1)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = 1
+ v2.AuxInt = int64ToAuxInt(1)
v1.AddArg2(v2, cmp)
v.AddArg3(v0, v1, cmp)
return true
for {
cmp := v_0
v.reset(OpPPC64ISELB)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v.AddArg2(v0, cmp)
return true
}
for {
cmp := v_0
v.reset(OpPPC64ISEL)
- v.AuxInt = 2
+ v.AuxInt = int32ToAuxInt(2)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpPPC64ISELB, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = 1
+ v2.AuxInt = int64ToAuxInt(1)
v1.AddArg2(v2, cmp)
v.AddArg3(v0, v1, cmp)
return true
for {
cmp := v_0
v.reset(OpPPC64ISELB)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v.AddArg2(v0, cmp)
return true
}
// match: (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _))
// result: (MTVSRD x)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpPPC64MOVDstore || v_1.AuxInt != off || v_1.Aux != sym {
+ if v_1.Op != OpPPC64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
x := v_1.Args[1]
}
// match: (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
- // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (FMOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64FMOVDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is16Bit(off1+off2)
- // result: (FMOVDload [off1+off2] {sym} ptr mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (FMOVDload [off1+int32(off2)] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(int64(off1) + off2)) {
break
}
v.reset(OpPPC64FMOVDload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (FMOVDstore [off] {sym} ptr (MTVSRD x) mem)
// result: (MOVDstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MTVSRD {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVDstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond: is16Bit(off1+off2)
- // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(int64(off1) + off2)) {
break
}
v.reset(OpPPC64FMOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
// match: (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
- // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // result: (FMOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
val := v_1
mem := v_2
break
}
v.reset(OpPPC64FMOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
v_0 := v.Args[0]
// match: (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
- // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (FMOVSload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64FMOVSload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is16Bit(off1+off2)
- // result: (FMOVSload [off1+off2] {sym} ptr mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (FMOVSload [off1+int32(off2)] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(int64(off1) + off2)) {
break
}
v.reset(OpPPC64FMOVSload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond: is16Bit(off1+off2)
- // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(int64(off1) + off2)) {
break
}
v.reset(OpPPC64FMOVSstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
// match: (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
- // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // result: (FMOVSstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
val := v_1
mem := v_2
break
}
v.reset(OpPPC64FMOVSstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
func rewriteValuePPC64_OpPPC64FSQRT(v *Value) bool {
v_0 := v.Args[0]
// match: (FSQRT (FMOVDconst [x]))
- // cond: auxTo64F(x) >= 0
- // result: (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
+ // cond: x >= 0
+ // result: (FMOVDconst [math.Sqrt(x)])
for {
if v_0.Op != OpPPC64FMOVDconst {
break
}
- x := v_0.AuxInt
- if !(auxTo64F(x) >= 0) {
+ x := auxIntToFloat64(v_0.AuxInt)
+ if !(x >= 0) {
break
}
v.reset(OpPPC64FMOVDconst)
- v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(x)))
+ v.AuxInt = float64ToAuxInt(math.Sqrt(x))
return true
}
return false
func rewriteValuePPC64_OpPPC64FTRUNC(v *Value) bool {
v_0 := v.Args[0]
// match: (FTRUNC (FMOVDconst [x]))
- // result: (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))])
+ // result: (FMOVDconst [math.Trunc(x)])
for {
if v_0.Op != OpPPC64FMOVDconst {
break
}
- x := v_0.AuxInt
+ x := auxIntToFloat64(v_0.AuxInt)
v.reset(OpPPC64FMOVDconst)
- v.AuxInt = auxFrom64F(math.Trunc(auxTo64F(x)))
+ v.AuxInt = float64ToAuxInt(math.Trunc(x))
return true
}
return false
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (GreaterEqual (FlagLT))
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (GreaterEqual (FlagGT))
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (GreaterEqual (InvertFlags x))
for {
cmp := v_0
v.reset(OpPPC64ISELB)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v.AddArg2(v0, cmp)
return true
}
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (GreaterThan (FlagLT))
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (GreaterThan (FlagGT))
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (GreaterThan (InvertFlags x))
for {
cmp := v_0
v.reset(OpPPC64ISELB)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v.AddArg2(v0, cmp)
return true
}
// cond: c >= d
// result: (ANDconst [d] y)
for {
- if v.AuxInt != 0 || v_0.Op != OpPPC64ANDconst {
+ if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64ANDconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
y := v_0.Args[0]
- if v_1.Op != OpPPC64MOVDconst || v_1.AuxInt != -1 || v_2.Op != OpPPC64CMPU {
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 || v_2.Op != OpPPC64CMPU {
break
}
_ = v_2.Args[1]
v_2_0 := v_2.Args[0]
- if v_2_0.Op != OpPPC64ANDconst || v_2_0.AuxInt != d || y != v_2_0.Args[0] {
+ if v_2_0.Op != OpPPC64ANDconst || auxIntToInt64(v_2_0.AuxInt) != d || y != v_2_0.Args[0] {
break
}
v_2_1 := v_2.Args[1]
if v_2_1.Op != OpPPC64MOVDconst {
break
}
- c := v_2_1.AuxInt
+ c := auxIntToInt64(v_2_1.AuxInt)
if !(c >= d) {
break
}
v.reset(OpPPC64ANDconst)
- v.AuxInt = d
+ v.AuxInt = int64ToAuxInt(d)
v.AddArg(y)
return true
}
// cond: c >= d
// result: (ANDconst [d] y)
for {
- if v.AuxInt != 0 || v_0.Op != OpPPC64ANDconst {
+ if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64ANDconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
y := v_0.Args[0]
- if v_1.Op != OpPPC64MOVDconst || v_1.AuxInt != -1 || v_2.Op != OpPPC64CMPUconst {
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 || v_2.Op != OpPPC64CMPUconst {
break
}
- c := v_2.AuxInt
+ c := auxIntToInt64(v_2.AuxInt)
v_2_0 := v_2.Args[0]
- if v_2_0.Op != OpPPC64ANDconst || v_2_0.AuxInt != d || y != v_2_0.Args[0] || !(c >= d) {
+ if v_2_0.Op != OpPPC64ANDconst || auxIntToInt64(v_2_0.AuxInt) != d || y != v_2_0.Args[0] || !(c >= d) {
break
}
v.reset(OpPPC64ANDconst)
- v.AuxInt = d
+ v.AuxInt = int64ToAuxInt(d)
v.AddArg(y)
return true
}
// match: (ISEL [2] x _ (FlagEQ))
// result: x
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
x := v_0
// match: (ISEL [2] _ y (FlagLT))
// result: y
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
y := v_1
// match: (ISEL [2] _ y (FlagGT))
// result: y
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
y := v_1
// match: (ISEL [6] _ y (FlagEQ))
// result: y
for {
- if v.AuxInt != 6 {
+ if auxIntToInt32(v.AuxInt) != 6 {
break
}
y := v_1
// match: (ISEL [6] x _ (FlagLT))
// result: x
for {
- if v.AuxInt != 6 {
+ if auxIntToInt32(v.AuxInt) != 6 {
break
}
x := v_0
// match: (ISEL [6] x _ (FlagGT))
// result: x
for {
- if v.AuxInt != 6 {
+ if auxIntToInt32(v.AuxInt) != 6 {
break
}
x := v_0
// match: (ISEL [0] _ y (FlagEQ))
// result: y
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
y := v_1
// match: (ISEL [0] _ y (FlagGT))
// result: y
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
y := v_1
// match: (ISEL [0] x _ (FlagLT))
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
x := v_0
// match: (ISEL [5] _ x (FlagEQ))
// result: x
for {
- if v.AuxInt != 5 {
+ if auxIntToInt32(v.AuxInt) != 5 {
break
}
x := v_1
// match: (ISEL [5] _ x (FlagLT))
// result: x
for {
- if v.AuxInt != 5 {
+ if auxIntToInt32(v.AuxInt) != 5 {
break
}
x := v_1
// match: (ISEL [5] y _ (FlagGT))
// result: y
for {
- if v.AuxInt != 5 {
+ if auxIntToInt32(v.AuxInt) != 5 {
break
}
y := v_0
// match: (ISEL [1] _ y (FlagEQ))
// result: y
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
y := v_1
// match: (ISEL [1] _ y (FlagLT))
// result: y
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
y := v_1
// match: (ISEL [1] x _ (FlagGT))
// result: x
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
x := v_0
// match: (ISEL [4] x _ (FlagEQ))
// result: x
for {
- if v.AuxInt != 4 {
+ if auxIntToInt32(v.AuxInt) != 4 {
break
}
x := v_0
// match: (ISEL [4] x _ (FlagGT))
// result: x
for {
- if v.AuxInt != 4 {
+ if auxIntToInt32(v.AuxInt) != 4 {
break
}
x := v_0
// match: (ISEL [4] _ y (FlagLT))
// result: y
for {
- if v.AuxInt != 4 {
+ if auxIntToInt32(v.AuxInt) != 4 {
break
}
y := v_1
// cond: n%4 == 0
// result: (ISEL [n+1] x y bool)
for {
- n := v.AuxInt
+ n := auxIntToInt32(v.AuxInt)
x := v_0
y := v_1
if v_2.Op != OpPPC64InvertFlags {
break
}
v.reset(OpPPC64ISEL)
- v.AuxInt = n + 1
+ v.AuxInt = int32ToAuxInt(n + 1)
v.AddArg3(x, y, bool)
return true
}
// cond: n%4 == 1
// result: (ISEL [n-1] x y bool)
for {
- n := v.AuxInt
+ n := auxIntToInt32(v.AuxInt)
x := v_0
y := v_1
if v_2.Op != OpPPC64InvertFlags {
break
}
v.reset(OpPPC64ISEL)
- v.AuxInt = n - 1
+ v.AuxInt = int32ToAuxInt(n - 1)
v.AddArg3(x, y, bool)
return true
}
// cond: n%4 == 2
// result: (ISEL [n] x y bool)
for {
- n := v.AuxInt
+ n := auxIntToInt32(v.AuxInt)
x := v_0
y := v_1
if v_2.Op != OpPPC64InvertFlags {
break
}
v.reset(OpPPC64ISEL)
- v.AuxInt = n
+ v.AuxInt = int32ToAuxInt(n)
v.AddArg3(x, y, bool)
return true
}
// match: (ISELB [0] _ (FlagLT))
// result: (MOVDconst [1])
for {
- if v.AuxInt != 0 || v_1.Op != OpPPC64FlagLT {
+ if auxIntToInt32(v.AuxInt) != 0 || v_1.Op != OpPPC64FlagLT {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (ISELB [0] _ (FlagGT))
// result: (MOVDconst [0])
for {
- if v.AuxInt != 0 || v_1.Op != OpPPC64FlagGT {
+ if auxIntToInt32(v.AuxInt) != 0 || v_1.Op != OpPPC64FlagGT {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (ISELB [0] _ (FlagEQ))
// result: (MOVDconst [0])
for {
- if v.AuxInt != 0 || v_1.Op != OpPPC64FlagEQ {
+ if auxIntToInt32(v.AuxInt) != 0 || v_1.Op != OpPPC64FlagEQ {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (ISELB [1] _ (FlagGT))
// result: (MOVDconst [1])
for {
- if v.AuxInt != 1 || v_1.Op != OpPPC64FlagGT {
+ if auxIntToInt32(v.AuxInt) != 1 || v_1.Op != OpPPC64FlagGT {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (ISELB [1] _ (FlagLT))
// result: (MOVDconst [0])
for {
- if v.AuxInt != 1 || v_1.Op != OpPPC64FlagLT {
+ if auxIntToInt32(v.AuxInt) != 1 || v_1.Op != OpPPC64FlagLT {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (ISELB [1] _ (FlagEQ))
// result: (MOVDconst [0])
for {
- if v.AuxInt != 1 || v_1.Op != OpPPC64FlagEQ {
+ if auxIntToInt32(v.AuxInt) != 1 || v_1.Op != OpPPC64FlagEQ {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (ISELB [2] _ (FlagEQ))
// result: (MOVDconst [1])
for {
- if v.AuxInt != 2 || v_1.Op != OpPPC64FlagEQ {
+ if auxIntToInt32(v.AuxInt) != 2 || v_1.Op != OpPPC64FlagEQ {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (ISELB [2] _ (FlagLT))
// result: (MOVDconst [0])
for {
- if v.AuxInt != 2 || v_1.Op != OpPPC64FlagLT {
+ if auxIntToInt32(v.AuxInt) != 2 || v_1.Op != OpPPC64FlagLT {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (ISELB [2] _ (FlagGT))
// result: (MOVDconst [0])
for {
- if v.AuxInt != 2 || v_1.Op != OpPPC64FlagGT {
+ if auxIntToInt32(v.AuxInt) != 2 || v_1.Op != OpPPC64FlagGT {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (ISELB [4] _ (FlagLT))
// result: (MOVDconst [0])
for {
- if v.AuxInt != 4 || v_1.Op != OpPPC64FlagLT {
+ if auxIntToInt32(v.AuxInt) != 4 || v_1.Op != OpPPC64FlagLT {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (ISELB [4] _ (FlagGT))
// result: (MOVDconst [1])
for {
- if v.AuxInt != 4 || v_1.Op != OpPPC64FlagGT {
+ if auxIntToInt32(v.AuxInt) != 4 || v_1.Op != OpPPC64FlagGT {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (ISELB [4] _ (FlagEQ))
// result: (MOVDconst [1])
for {
- if v.AuxInt != 4 || v_1.Op != OpPPC64FlagEQ {
+ if auxIntToInt32(v.AuxInt) != 4 || v_1.Op != OpPPC64FlagEQ {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (ISELB [5] _ (FlagGT))
// result: (MOVDconst [0])
for {
- if v.AuxInt != 5 || v_1.Op != OpPPC64FlagGT {
+ if auxIntToInt32(v.AuxInt) != 5 || v_1.Op != OpPPC64FlagGT {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (ISELB [5] _ (FlagLT))
// result: (MOVDconst [1])
for {
- if v.AuxInt != 5 || v_1.Op != OpPPC64FlagLT {
+ if auxIntToInt32(v.AuxInt) != 5 || v_1.Op != OpPPC64FlagLT {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (ISELB [5] _ (FlagEQ))
// result: (MOVDconst [1])
for {
- if v.AuxInt != 5 || v_1.Op != OpPPC64FlagEQ {
+ if auxIntToInt32(v.AuxInt) != 5 || v_1.Op != OpPPC64FlagEQ {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (ISELB [6] _ (FlagEQ))
// result: (MOVDconst [0])
for {
- if v.AuxInt != 6 || v_1.Op != OpPPC64FlagEQ {
+ if auxIntToInt32(v.AuxInt) != 6 || v_1.Op != OpPPC64FlagEQ {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (ISELB [6] _ (FlagLT))
// result: (MOVDconst [1])
for {
- if v.AuxInt != 6 || v_1.Op != OpPPC64FlagLT {
+ if auxIntToInt32(v.AuxInt) != 6 || v_1.Op != OpPPC64FlagLT {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (ISELB [6] _ (FlagGT))
// result: (MOVDconst [1])
for {
- if v.AuxInt != 6 || v_1.Op != OpPPC64FlagGT {
+ if auxIntToInt32(v.AuxInt) != 6 || v_1.Op != OpPPC64FlagGT {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (ISELB [n] (MOVDconst [1]) (InvertFlags bool))
// cond: n%4 == 0
// result: (ISELB [n+1] (MOVDconst [1]) bool)
for {
- n := v.AuxInt
- if v_0.Op != OpPPC64MOVDconst || v_0.AuxInt != 1 || v_1.Op != OpPPC64InvertFlags {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 || v_1.Op != OpPPC64InvertFlags {
break
}
bool := v_1.Args[0]
break
}
v.reset(OpPPC64ISELB)
- v.AuxInt = n + 1
+ v.AuxInt = int32ToAuxInt(n + 1)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v.AddArg2(v0, bool)
return true
}
// cond: n%4 == 1
// result: (ISELB [n-1] (MOVDconst [1]) bool)
for {
- n := v.AuxInt
- if v_0.Op != OpPPC64MOVDconst || v_0.AuxInt != 1 || v_1.Op != OpPPC64InvertFlags {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 || v_1.Op != OpPPC64InvertFlags {
break
}
bool := v_1.Args[0]
break
}
v.reset(OpPPC64ISELB)
- v.AuxInt = n - 1
+ v.AuxInt = int32ToAuxInt(n - 1)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v.AddArg2(v0, bool)
return true
}
// cond: n%4 == 2
// result: (ISELB [n] (MOVDconst [1]) bool)
for {
- n := v.AuxInt
- if v_0.Op != OpPPC64MOVDconst || v_0.AuxInt != 1 || v_1.Op != OpPPC64InvertFlags {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 || v_1.Op != OpPPC64InvertFlags {
break
}
bool := v_1.Args[0]
break
}
v.reset(OpPPC64ISELB)
- v.AuxInt = n
+ v.AuxInt = int32ToAuxInt(n)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v.AddArg2(v0, bool)
return true
}
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (LessEqual (FlagLT))
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (LessEqual (FlagGT))
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (LessEqual (InvertFlags x))
for {
cmp := v_0
v.reset(OpPPC64ISELB)
- v.AuxInt = 5
+ v.AuxInt = int32ToAuxInt(5)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v.AddArg2(v0, cmp)
return true
}
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (LessThan (FlagLT))
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (LessThan (FlagGT))
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (LessThan (InvertFlags x))
for {
cmp := v_0
v.reset(OpPPC64ISELB)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v.AddArg2(v0, cmp)
return true
}
b := v.Block
typ := &b.Func.Config.Types
// match: (MFVSRD (FMOVDconst [c]))
- // result: (MOVDconst [c])
+ // result: (MOVDconst [int64(math.Float64bits(c))])
for {
if v_0.Op != OpPPC64FMOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToFloat64(v_0.AuxInt)
v.reset(OpPPC64MOVDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(int64(math.Float64bits(c)))
return true
}
// match: (MFVSRD x:(FMOVDload [off] {sym} ptr mem))
if x.Op != OpPPC64FMOVDload {
break
}
- off := x.AuxInt
- sym := x.Aux
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
mem := x.Args[1]
ptr := x.Args[0]
if !(x.Uses == 1 && clobber(x)) {
b = x.Block
v0 := b.NewValue0(x.Pos, OpPPC64MOVDload, typ.Int64)
v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
v_0 := v.Args[0]
// match: (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
- // result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (MOVBZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64MOVBZload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBZload [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVBZload [off1+off2] {sym} x mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVBZload [off1+int32(off2)] {sym} x mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(int64(off1) + off2)) {
break
}
v.reset(OpPPC64MOVBZload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(x, mem)
return true
}
// cond: sym == nil && p.Uses == 1
// result: (MOVBZloadidx ptr idx mem)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
- sym := v.Aux
+ sym := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64ADD {
break
v_0 := v.Args[0]
// match: (MOVBZloadidx ptr (MOVDconst [c]) mem)
// cond: is16Bit(c)
- // result: (MOVBZload [c] ptr mem)
+ // result: (MOVBZload [int32(c)] ptr mem)
for {
ptr := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
mem := v_2
if !(is16Bit(c)) {
break
}
v.reset(OpPPC64MOVBZload)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBZloadidx (MOVDconst [c]) ptr mem)
// cond: is16Bit(c)
- // result: (MOVBZload [c] ptr mem)
+ // result: (MOVBZload [int32(c)] ptr mem)
for {
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
ptr := v_1
mem := v_2
if !(is16Bit(c)) {
break
}
v.reset(OpPPC64MOVBZload)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg2(ptr, mem)
return true
}
if y.Op != OpPPC64ANDconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0xFF) {
break
}
if v_0.Op != OpPPC64SRWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVBZreg {
break
}
x := v_0_0.Args[0]
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
if v_0.Op != OpPPC64SRWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(sizeof(x.Type) == 8) {
break
}
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c >= 56) {
break
}
v.reset(OpPPC64SRDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c >= 24) {
break
}
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpPPC64MOVDconst)
- v.AuxInt = int64(uint8(c))
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
return true
}
return false
if y.Op != OpPPC64ANDconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0x7F) {
break
}
if v_0.Op != OpPPC64SRAWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVBreg {
break
}
x := v_0_0.Args[0]
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
if v_0.Op != OpPPC64SRAWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(sizeof(x.Type) == 8) {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c > 56) {
break
}
v.reset(OpPPC64SRDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c == 56) {
break
}
v.reset(OpPPC64SRADconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRADconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c >= 56) {
break
}
v.reset(OpPPC64SRADconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c > 24) {
break
}
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c == 24) {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRAWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c >= 24) {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpPPC64MOVDconst)
- v.AuxInt = int64(int8(c))
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
return true
}
return false
config := b.Func.Config
typ := &b.Func.Config.Types
// match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVBstore [off1+off2] {sym} x val mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVBstore [off1+int32(off2)] {sym} x val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(int64(off1) + off2)) {
break
}
v.reset(OpPPC64MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg3(x, val, mem)
return true
}
// match: (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
- // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // result: (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
val := v_1
mem := v_2
break
}
v.reset(OpPPC64MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem)
// result: (MOVBstorezero [off] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpPPC64MOVDconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpPPC64MOVBstorezero)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// cond: sym == nil && p.Uses == 1
// result: (MOVBstoreidx ptr idx val mem)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
- sym := v.Aux
+ sym := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64ADD {
break
// match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVBreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVBZreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVHreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVHZreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVHZreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVWreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVWZreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVWZreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: c <= 8
// result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64SRWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpPPC64MOVHreg {
break
break
}
v.reset(OpPPC64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
v.AddArg3(ptr, v0, mem)
return true
// cond: c <= 8
// result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64SRWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpPPC64MOVHZreg {
break
break
}
v.reset(OpPPC64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
v.AddArg3(ptr, v0, mem)
return true
// cond: c <= 24
// result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64SRWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpPPC64MOVWreg {
break
break
}
v.reset(OpPPC64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
v.AddArg3(ptr, v0, mem)
return true
// cond: c <= 24
// result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64SRWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpPPC64MOVWZreg {
break
break
}
v.reset(OpPPC64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
v.AddArg3(ptr, v0, mem)
return true
// cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)
// result: (MOVHstore [i0] {s} p (SRWconst <typ.UInt16> w [16]) mem)
for {
- i1 := v.AuxInt
- s := v.Aux
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
- if v_1.Op != OpPPC64SRWconst || v_1.AuxInt != 24 {
+ if v_1.Op != OpPPC64SRWconst || auxIntToInt64(v_1.AuxInt) != 24 {
break
}
w := v_1.Args[0]
if x0.Op != OpPPC64MOVBstore {
break
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpPPC64SRWconst || x0_1.AuxInt != 16 || w != x0_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) {
+ if x0_1.Op != OpPPC64SRWconst || auxIntToInt64(x0_1.AuxInt) != 16 || w != x0_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) {
break
}
v.reset(OpPPC64MOVHstore)
- v.AuxInt = i0
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x0.Pos, OpPPC64SRWconst, typ.UInt16)
- v0.AuxInt = 16
+ v0.AuxInt = int64ToAuxInt(16)
v0.AddArg(w)
v.AddArg3(p, v0, mem)
return true
// cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)
// result: (MOVHstore [i0] {s} p (SRWconst <typ.UInt16> w [16]) mem)
for {
- i1 := v.AuxInt
- s := v.Aux
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
- if v_1.Op != OpPPC64SRDconst || v_1.AuxInt != 24 {
+ if v_1.Op != OpPPC64SRDconst || auxIntToInt64(v_1.AuxInt) != 24 {
break
}
w := v_1.Args[0]
if x0.Op != OpPPC64MOVBstore {
break
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpPPC64SRDconst || x0_1.AuxInt != 16 || w != x0_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) {
+ if x0_1.Op != OpPPC64SRDconst || auxIntToInt64(x0_1.AuxInt) != 16 || w != x0_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) {
break
}
v.reset(OpPPC64MOVHstore)
- v.AuxInt = i0
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x0.Pos, OpPPC64SRWconst, typ.UInt16)
- v0.AuxInt = 16
+ v0.AuxInt = int64ToAuxInt(16)
v0.AddArg(w)
v.AddArg3(p, v0, mem)
return true
// cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)
// result: (MOVHstore [i0] {s} p w mem)
for {
- i1 := v.AuxInt
- s := v.Aux
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
- if v_1.Op != OpPPC64SRWconst || v_1.AuxInt != 8 {
+ if v_1.Op != OpPPC64SRWconst || auxIntToInt64(v_1.AuxInt) != 8 {
break
}
w := v_1.Args[0]
if x0.Op != OpPPC64MOVBstore {
break
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
break
}
v.reset(OpPPC64MOVHstore)
- v.AuxInt = i0
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)
// result: (MOVHstore [i0] {s} p w mem)
for {
- i1 := v.AuxInt
- s := v.Aux
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
- if v_1.Op != OpPPC64SRDconst || v_1.AuxInt != 8 {
+ if v_1.Op != OpPPC64SRDconst || auxIntToInt64(v_1.AuxInt) != 8 {
break
}
w := v_1.Args[0]
if x0.Op != OpPPC64MOVBstore {
break
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
break
}
v.reset(OpPPC64MOVHstore)
- v.AuxInt = i0
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// cond: !config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && clobber(x0, x1, x2)
// result: (MOVWBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
for {
- i3 := v.AuxInt
- s := v.Aux
+ i3 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x0 := v_2
if x0.Op != OpPPC64MOVBstore {
break
}
- i2 := x0.AuxInt
- if x0.Aux != s {
+ i2 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpPPC64SRWconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] {
+ if x0_1.Op != OpPPC64SRWconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
break
}
x1 := x0.Args[2]
if x1.Op != OpPPC64MOVBstore {
break
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpPPC64SRWconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
+ if x1_1.Op != OpPPC64SRWconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
break
}
x2 := x1.Args[2]
if x2.Op != OpPPC64MOVBstore {
break
}
- i0 := x2.AuxInt
- if x2.Aux != s {
+ i0 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
break
}
mem := x2.Args[2]
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpPPC64SRWconst || x2_1.AuxInt != 24 || w != x2_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && clobber(x0, x1, x2)) {
+ if x2_1.Op != OpPPC64SRWconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && clobber(x0, x1, x2)) {
break
}
v.reset(OpPPC64MOVWBRstore)
v0 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr)
- v0.AuxInt = i0
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
v0.AddArg(p)
v.AddArg3(v0, w, mem)
return true
// cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)
// result: (MOVHBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
for {
- i1 := v.AuxInt
- s := v.Aux
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x0 := v_2
if x0.Op != OpPPC64MOVBstore {
break
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpPPC64SRWconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) {
+ if x0_1.Op != OpPPC64SRWconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) {
break
}
v.reset(OpPPC64MOVHBRstore)
v0 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr)
- v0.AuxInt = i0
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
v0.AddArg(p)
v.AddArg3(v0, w, mem)
return true
// cond: !config.BigEndian && i0%4 == 0 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3)
// result: (MOVDstore [i0] {s} p w mem)
for {
- i7 := v.AuxInt
- s := v.Aux
+ i7 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
- if v_1.Op != OpPPC64SRDconst || v_1.AuxInt != 56 {
+ if v_1.Op != OpPPC64SRDconst || auxIntToInt64(v_1.AuxInt) != 56 {
break
}
w := v_1.Args[0]
if x0.Op != OpPPC64MOVBstore {
break
}
- i6 := x0.AuxInt
- if x0.Aux != s {
+ i6 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpPPC64SRDconst || x0_1.AuxInt != 48 || w != x0_1.Args[0] {
+ if x0_1.Op != OpPPC64SRDconst || auxIntToInt64(x0_1.AuxInt) != 48 || w != x0_1.Args[0] {
break
}
x1 := x0.Args[2]
if x1.Op != OpPPC64MOVBstore {
break
}
- i5 := x1.AuxInt
- if x1.Aux != s {
+ i5 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpPPC64SRDconst || x1_1.AuxInt != 40 || w != x1_1.Args[0] {
+ if x1_1.Op != OpPPC64SRDconst || auxIntToInt64(x1_1.AuxInt) != 40 || w != x1_1.Args[0] {
break
}
x2 := x1.Args[2]
if x2.Op != OpPPC64MOVBstore {
break
}
- i4 := x2.AuxInt
- if x2.Aux != s {
+ i4 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[2]
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpPPC64SRDconst || x2_1.AuxInt != 32 || w != x2_1.Args[0] {
+ if x2_1.Op != OpPPC64SRDconst || auxIntToInt64(x2_1.AuxInt) != 32 || w != x2_1.Args[0] {
break
}
x3 := x2.Args[2]
if x3.Op != OpPPC64MOVWstore {
break
}
- i0 := x3.AuxInt
- if x3.Aux != s {
+ i0 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
break
}
mem := x3.Args[2]
break
}
v.reset(OpPPC64MOVDstore)
- v.AuxInt = i0
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// cond: !config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3, x4, x5, x6)
// result: (MOVDBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
for {
- i7 := v.AuxInt
- s := v.Aux
+ i7 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x0 := v_2
if x0.Op != OpPPC64MOVBstore {
break
}
- i6 := x0.AuxInt
- if x0.Aux != s {
+ i6 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpPPC64SRDconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] {
+ if x0_1.Op != OpPPC64SRDconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
break
}
x1 := x0.Args[2]
if x1.Op != OpPPC64MOVBstore {
break
}
- i5 := x1.AuxInt
- if x1.Aux != s {
+ i5 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpPPC64SRDconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
+ if x1_1.Op != OpPPC64SRDconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
break
}
x2 := x1.Args[2]
if x2.Op != OpPPC64MOVBstore {
break
}
- i4 := x2.AuxInt
- if x2.Aux != s {
+ i4 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[2]
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpPPC64SRDconst || x2_1.AuxInt != 24 || w != x2_1.Args[0] {
+ if x2_1.Op != OpPPC64SRDconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] {
break
}
x3 := x2.Args[2]
if x3.Op != OpPPC64MOVBstore {
break
}
- i3 := x3.AuxInt
- if x3.Aux != s {
+ i3 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[2]
break
}
x3_1 := x3.Args[1]
- if x3_1.Op != OpPPC64SRDconst || x3_1.AuxInt != 32 || w != x3_1.Args[0] {
+ if x3_1.Op != OpPPC64SRDconst || auxIntToInt64(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
break
}
x4 := x3.Args[2]
if x4.Op != OpPPC64MOVBstore {
break
}
- i2 := x4.AuxInt
- if x4.Aux != s {
+ i2 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
break
}
_ = x4.Args[2]
break
}
x4_1 := x4.Args[1]
- if x4_1.Op != OpPPC64SRDconst || x4_1.AuxInt != 40 || w != x4_1.Args[0] {
+ if x4_1.Op != OpPPC64SRDconst || auxIntToInt64(x4_1.AuxInt) != 40 || w != x4_1.Args[0] {
break
}
x5 := x4.Args[2]
if x5.Op != OpPPC64MOVBstore {
break
}
- i1 := x5.AuxInt
- if x5.Aux != s {
+ i1 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
break
}
_ = x5.Args[2]
break
}
x5_1 := x5.Args[1]
- if x5_1.Op != OpPPC64SRDconst || x5_1.AuxInt != 48 || w != x5_1.Args[0] {
+ if x5_1.Op != OpPPC64SRDconst || auxIntToInt64(x5_1.AuxInt) != 48 || w != x5_1.Args[0] {
break
}
x6 := x5.Args[2]
if x6.Op != OpPPC64MOVBstore {
break
}
- i0 := x6.AuxInt
- if x6.Aux != s {
+ i0 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
break
}
mem := x6.Args[2]
break
}
x6_1 := x6.Args[1]
- if x6_1.Op != OpPPC64SRDconst || x6_1.AuxInt != 56 || w != x6_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ if x6_1.Op != OpPPC64SRDconst || auxIntToInt64(x6_1.AuxInt) != 56 || w != x6_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
break
}
v.reset(OpPPC64MOVDBRstore)
v0 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr)
- v0.AuxInt = i0
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
v0.AddArg(p)
v.AddArg3(v0, w, mem)
return true
typ := &b.Func.Config.Types
// match: (MOVBstoreidx ptr (MOVDconst [c]) val mem)
// cond: is16Bit(c)
- // result: (MOVBstore [c] ptr val mem)
+ // result: (MOVBstore [int32(c)] ptr val mem)
for {
ptr := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
val := v_2
mem := v_3
if !(is16Bit(c)) {
break
}
v.reset(OpPPC64MOVBstore)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVBstoreidx (MOVDconst [c]) ptr val mem)
// cond: is16Bit(c)
- // result: (MOVBstore [c] ptr val mem)
+ // result: (MOVBstore [int32(c)] ptr val mem)
for {
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
ptr := v_1
val := v_2
mem := v_3
break
}
v.reset(OpPPC64MOVBstore)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg3(ptr, val, mem)
return true
}
if v_2.Op != OpPPC64SRWconst {
break
}
- c := v_2.AuxInt
+ c := auxIntToInt64(v_2.AuxInt)
v_2_0 := v_2.Args[0]
if v_2_0.Op != OpPPC64MOVHreg {
break
}
v.reset(OpPPC64MOVBstoreidx)
v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
v.AddArg4(ptr, idx, v0, mem)
return true
if v_2.Op != OpPPC64SRWconst {
break
}
- c := v_2.AuxInt
+ c := auxIntToInt64(v_2.AuxInt)
v_2_0 := v_2.Args[0]
if v_2_0.Op != OpPPC64MOVHZreg {
break
}
v.reset(OpPPC64MOVBstoreidx)
v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
v.AddArg4(ptr, idx, v0, mem)
return true
if v_2.Op != OpPPC64SRWconst {
break
}
- c := v_2.AuxInt
+ c := auxIntToInt64(v_2.AuxInt)
v_2_0 := v_2.Args[0]
if v_2_0.Op != OpPPC64MOVWreg {
break
}
v.reset(OpPPC64MOVBstoreidx)
v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
v.AddArg4(ptr, idx, v0, mem)
return true
if v_2.Op != OpPPC64SRWconst {
break
}
- c := v_2.AuxInt
+ c := auxIntToInt64(v_2.AuxInt)
v_2_0 := v_2.Args[0]
if v_2_0.Op != OpPPC64MOVWZreg {
break
}
v.reset(OpPPC64MOVBstoreidx)
v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
v.AddArg4(ptr, idx, v0, mem)
return true
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVBstorezero [off1+off2] {sym} x mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVBstorezero [off1+int32(off2)] {sym} x mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(int64(off1) + off2)) {
break
}
v.reset(OpPPC64MOVBstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(x, mem)
return true
}
// match: (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _))
// result: (MFVSRD x)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpPPC64FMOVDstore || v_1.AuxInt != off || v_1.Aux != sym {
+ if v_1.Op != OpPPC64FMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
x := v_1.Args[1]
}
// match: (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
- // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVDload [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2) && (off1+off2)%4 == 0
- // result: (MOVDload [off1+off2] {sym} x mem)
+ // cond: is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0
+ // result: (MOVDload [off1+int32(off2)] {sym} x mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(off1+off2) && (off1+off2)%4 == 0) {
+ if !(is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVDload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(x, mem)
return true
}
// cond: sym == nil && p.Uses == 1
// result: (MOVDloadidx ptr idx mem)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
- sym := v.Aux
+ sym := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64ADD {
break
v_0 := v.Args[0]
// match: (MOVDloadidx ptr (MOVDconst [c]) mem)
// cond: is16Bit(c) && c%4 == 0
- // result: (MOVDload [c] ptr mem)
+ // result: (MOVDload [int32(c)] ptr mem)
for {
ptr := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
mem := v_2
if !(is16Bit(c) && c%4 == 0) {
break
}
v.reset(OpPPC64MOVDload)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVDloadidx (MOVDconst [c]) ptr mem)
// cond: is16Bit(c) && c%4 == 0
- // result: (MOVDload [c] ptr mem)
+ // result: (MOVDload [int32(c)] ptr mem)
for {
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
ptr := v_1
mem := v_2
if !(is16Bit(c) && c%4 == 0) {
break
}
v.reset(OpPPC64MOVDload)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVDstore [off] {sym} ptr (MFVSRD x) mem)
// result: (FMOVDstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MFVSRD {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64FMOVDstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(off1+off2) && (off1+off2)%4 == 0
- // result: (MOVDstore [off1+off2] {sym} x val mem)
+ // cond: is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0
+ // result: (MOVDstore [off1+int32(off2)] {sym} x val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(off1+off2) && (off1+off2)%4 == 0) {
+ if !(is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg3(x, val, mem)
return true
}
// match: (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
- // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // result: (MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
val := v_1
mem := v_2
break
}
v.reset(OpPPC64MOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem)
// result: (MOVDstorezero [off] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpPPC64MOVDconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpPPC64MOVDstorezero)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// cond: sym == nil && p.Uses == 1
// result: (MOVDstoreidx ptr idx val mem)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
- sym := v.Aux
+ sym := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64ADD {
break
v_0 := v.Args[0]
// match: (MOVDstoreidx ptr (MOVDconst [c]) val mem)
// cond: is16Bit(c) && c%4 == 0
- // result: (MOVDstore [c] ptr val mem)
+ // result: (MOVDstore [int32(c)] ptr val mem)
for {
ptr := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
val := v_2
mem := v_3
if !(is16Bit(c) && c%4 == 0) {
break
}
v.reset(OpPPC64MOVDstore)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVDstoreidx (MOVDconst [c]) ptr val mem)
// cond: is16Bit(c) && c%4 == 0
- // result: (MOVDstore [c] ptr val mem)
+ // result: (MOVDstore [int32(c)] ptr val mem)
for {
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
ptr := v_1
val := v_2
mem := v_3
break
}
v.reset(OpPPC64MOVDstore)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg3(ptr, val, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2) && (off1+off2)%4 == 0
- // result: (MOVDstorezero [off1+off2] {sym} x mem)
+ // cond: is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0
+ // result: (MOVDstorezero [off1+int32(off2)] {sym} x mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(off1+off2) && (off1+off2)%4 == 0) {
+ if !(is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVDstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(x, mem)
return true
}
// match: (MOVHBRstore {sym} ptr (MOVHreg x) mem)
// result: (MOVHBRstore {sym} ptr x mem)
for {
- sym := v.Aux
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVHreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVHBRstore)
- v.Aux = sym
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVHBRstore {sym} ptr (MOVHZreg x) mem)
// result: (MOVHBRstore {sym} ptr x mem)
for {
- sym := v.Aux
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVHZreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVHBRstore)
- v.Aux = sym
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVHBRstore {sym} ptr (MOVWreg x) mem)
// result: (MOVHBRstore {sym} ptr x mem)
for {
- sym := v.Aux
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVWreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVHBRstore)
- v.Aux = sym
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVHBRstore {sym} ptr (MOVWZreg x) mem)
// result: (MOVHBRstore {sym} ptr x mem)
for {
- sym := v.Aux
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVWZreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVHBRstore)
- v.Aux = sym
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
v_0 := v.Args[0]
// match: (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
- // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (MOVHZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64MOVHZload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVHZload [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVHZload [off1+off2] {sym} x mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVHZload [off1+int32(off2)] {sym} x mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(int64(off1) + off2)) {
break
}
v.reset(OpPPC64MOVHZload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(x, mem)
return true
}
// cond: sym == nil && p.Uses == 1
// result: (MOVHZloadidx ptr idx mem)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
- sym := v.Aux
+ sym := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64ADD {
break
v_0 := v.Args[0]
// match: (MOVHZloadidx ptr (MOVDconst [c]) mem)
// cond: is16Bit(c)
- // result: (MOVHZload [c] ptr mem)
+ // result: (MOVHZload [int32(c)] ptr mem)
for {
ptr := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
mem := v_2
if !(is16Bit(c)) {
break
}
v.reset(OpPPC64MOVHZload)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVHZloadidx (MOVDconst [c]) ptr mem)
// cond: is16Bit(c)
- // result: (MOVHZload [c] ptr mem)
+ // result: (MOVHZload [int32(c)] ptr mem)
for {
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
ptr := v_1
mem := v_2
if !(is16Bit(c)) {
break
}
v.reset(OpPPC64MOVHZload)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg2(ptr, mem)
return true
}
if y.Op != OpPPC64ANDconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0xFFFF) {
break
}
if v_0.Op != OpPPC64SRWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVBZreg {
break
}
x := v_0_0.Args[0]
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
if v_0.Op != OpPPC64SRWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVHZreg {
break
}
x := v_0_0.Args[0]
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
if v_0.Op != OpPPC64SRWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(sizeof(x.Type) <= 16) {
break
}
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c >= 48) {
break
}
v.reset(OpPPC64SRDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c >= 16) {
break
}
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpPPC64MOVDconst)
- v.AuxInt = int64(uint16(c))
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
return true
}
return false
v_0 := v.Args[0]
// match: (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
- // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64MOVHload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVHload [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVHload [off1+off2] {sym} x mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVHload [off1+int32(off2)] {sym} x mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(int64(off1) + off2)) {
break
}
v.reset(OpPPC64MOVHload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(x, mem)
return true
}
// cond: sym == nil && p.Uses == 1
// result: (MOVHloadidx ptr idx mem)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
- sym := v.Aux
+ sym := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64ADD {
break
v_0 := v.Args[0]
// match: (MOVHloadidx ptr (MOVDconst [c]) mem)
// cond: is16Bit(c)
- // result: (MOVHload [c] ptr mem)
+ // result: (MOVHload [int32(c)] ptr mem)
for {
ptr := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
mem := v_2
if !(is16Bit(c)) {
break
}
v.reset(OpPPC64MOVHload)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVHloadidx (MOVDconst [c]) ptr mem)
// cond: is16Bit(c)
- // result: (MOVHload [c] ptr mem)
+ // result: (MOVHload [int32(c)] ptr mem)
for {
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
ptr := v_1
mem := v_2
if !(is16Bit(c)) {
break
}
v.reset(OpPPC64MOVHload)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg2(ptr, mem)
return true
}
if y.Op != OpPPC64ANDconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0x7FFF) {
break
}
if v_0.Op != OpPPC64SRAWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVBreg {
break
}
x := v_0_0.Args[0]
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
if v_0.Op != OpPPC64SRAWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVHreg {
break
}
x := v_0_0.Args[0]
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
if v_0.Op != OpPPC64SRAWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(sizeof(x.Type) <= 16) {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c > 48) {
break
}
v.reset(OpPPC64SRDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c == 48) {
break
}
v.reset(OpPPC64SRADconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRADconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c >= 48) {
break
}
v.reset(OpPPC64SRADconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c > 16) {
break
}
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRAWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c >= 16) {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c == 16) {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpPPC64MOVDconst)
- v.AuxInt = int64(int16(c))
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
return true
}
return false
b := v.Block
config := b.Func.Config
// match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVHstore [off1+off2] {sym} x val mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVHstore [off1+int32(off2)] {sym} x val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(int64(off1) + off2)) {
break
}
v.reset(OpPPC64MOVHstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg3(x, val, mem)
return true
}
// match: (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
- // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // result: (MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
val := v_1
mem := v_2
break
}
v.reset(OpPPC64MOVHstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem)
// result: (MOVHstorezero [off] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpPPC64MOVDconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpPPC64MOVHstorezero)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// cond: sym == nil && p.Uses == 1
// result: (MOVHstoreidx ptr idx val mem)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
- sym := v.Aux
+ sym := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64ADD {
break
// match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
// result: (MOVHstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVHreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVHstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem)
// result: (MOVHstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVHZreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVHstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
// result: (MOVHstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVWreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVHstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVWZreg x) mem)
// result: (MOVHstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVWZreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVHstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0)
// result: (MOVWstore [i0] {s} p w mem)
for {
- i1 := v.AuxInt
- s := v.Aux
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
- if v_1.Op != OpPPC64SRWconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpPPC64SRWconst || auxIntToInt64(v_1.AuxInt) != 16 {
break
}
w := v_1.Args[0]
if x0.Op != OpPPC64MOVHstore {
break
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
break
}
v.reset(OpPPC64MOVWstore)
- v.AuxInt = i0
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0)
// result: (MOVWstore [i0] {s} p w mem)
for {
- i1 := v.AuxInt
- s := v.Aux
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
- if v_1.Op != OpPPC64SRDconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpPPC64SRDconst || auxIntToInt64(v_1.AuxInt) != 16 {
break
}
w := v_1.Args[0]
if x0.Op != OpPPC64MOVHstore {
break
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
break
}
v.reset(OpPPC64MOVWstore)
- v.AuxInt = i0
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
v_0 := v.Args[0]
// match: (MOVHstoreidx ptr (MOVDconst [c]) val mem)
// cond: is16Bit(c)
- // result: (MOVHstore [c] ptr val mem)
+ // result: (MOVHstore [int32(c)] ptr val mem)
for {
ptr := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
val := v_2
mem := v_3
if !(is16Bit(c)) {
break
}
v.reset(OpPPC64MOVHstore)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVHstoreidx (MOVDconst [c]) ptr val mem)
// cond: is16Bit(c)
- // result: (MOVHstore [c] ptr val mem)
+ // result: (MOVHstore [int32(c)] ptr val mem)
for {
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
ptr := v_1
val := v_2
mem := v_3
break
}
v.reset(OpPPC64MOVHstore)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg3(ptr, val, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVHstorezero [off1+off2] {sym} x mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVHstorezero [off1+int32(off2)] {sym} x mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(int64(off1) + off2)) {
break
}
v.reset(OpPPC64MOVHstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(x, mem)
return true
}
// match: (MOVWBRstore {sym} ptr (MOVWreg x) mem)
// result: (MOVWBRstore {sym} ptr x mem)
for {
- sym := v.Aux
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVWreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVWBRstore)
- v.Aux = sym
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVWBRstore {sym} ptr (MOVWZreg x) mem)
// result: (MOVWBRstore {sym} ptr x mem)
for {
- sym := v.Aux
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVWZreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVWBRstore)
- v.Aux = sym
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
v_0 := v.Args[0]
// match: (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
- // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (MOVWZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64MOVWZload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVWZload [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVWZload [off1+off2] {sym} x mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVWZload [off1+int32(off2)] {sym} x mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(int64(off1) + off2)) {
break
}
v.reset(OpPPC64MOVWZload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(x, mem)
return true
}
// cond: sym == nil && p.Uses == 1
// result: (MOVWZloadidx ptr idx mem)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
- sym := v.Aux
+ sym := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64ADD {
break
v_0 := v.Args[0]
// match: (MOVWZloadidx ptr (MOVDconst [c]) mem)
// cond: is16Bit(c)
- // result: (MOVWZload [c] ptr mem)
+ // result: (MOVWZload [int32(c)] ptr mem)
for {
ptr := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
mem := v_2
if !(is16Bit(c)) {
break
}
v.reset(OpPPC64MOVWZload)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVWZloadidx (MOVDconst [c]) ptr mem)
// cond: is16Bit(c)
- // result: (MOVWZload [c] ptr mem)
+ // result: (MOVWZload [int32(c)] ptr mem)
for {
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
ptr := v_1
mem := v_2
if !(is16Bit(c)) {
break
}
v.reset(OpPPC64MOVWZload)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg2(ptr, mem)
return true
}
if y.Op != OpPPC64ANDconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0xFFFFFFFF) {
break
}
if y_0.Op != OpPPC64MOVDconst {
continue
}
- c := y_0.AuxInt
+ c := auxIntToInt64(y_0.AuxInt)
if !(uint64(c) <= 0xFFFFFFFF) {
continue
}
if v_0.Op != OpPPC64SRWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVBZreg {
break
}
x := v_0_0.Args[0]
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
if v_0.Op != OpPPC64SRWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVHZreg {
break
}
x := v_0_0.Args[0]
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
if v_0.Op != OpPPC64SRWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVWZreg {
break
}
x := v_0_0.Args[0]
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
if v_0.Op != OpPPC64SRWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(sizeof(x.Type) <= 32) {
break
}
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c >= 32) {
break
}
v.reset(OpPPC64SRDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpPPC64MOVDconst)
- v.AuxInt = int64(uint32(c))
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
return true
}
return false
v_0 := v.Args[0]
// match: (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
- // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVWload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVWload [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2) && (off1+off2)%4 == 0
- // result: (MOVWload [off1+off2] {sym} x mem)
+ // cond: is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0
+ // result: (MOVWload [off1+int32(off2)] {sym} x mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(off1+off2) && (off1+off2)%4 == 0) {
+ if !(is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVWload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(x, mem)
return true
}
// cond: sym == nil && p.Uses == 1
// result: (MOVWloadidx ptr idx mem)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
- sym := v.Aux
+ sym := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64ADD {
break
v_0 := v.Args[0]
// match: (MOVWloadidx ptr (MOVDconst [c]) mem)
// cond: is16Bit(c) && c%4 == 0
- // result: (MOVWload [c] ptr mem)
+ // result: (MOVWload [int32(c)] ptr mem)
for {
ptr := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
mem := v_2
if !(is16Bit(c) && c%4 == 0) {
break
}
v.reset(OpPPC64MOVWload)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVWloadidx (MOVDconst [c]) ptr mem)
// cond: is16Bit(c) && c%4 == 0
- // result: (MOVWload [c] ptr mem)
+ // result: (MOVWload [int32(c)] ptr mem)
for {
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
ptr := v_1
mem := v_2
if !(is16Bit(c) && c%4 == 0) {
break
}
v.reset(OpPPC64MOVWload)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg2(ptr, mem)
return true
}
if y.Op != OpPPC64ANDconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if !(uint64(c) <= 0xFFFF) {
break
}
if y_0.Op != OpPPC64MOVDconst {
continue
}
- c := y_0.AuxInt
+ c := auxIntToInt64(y_0.AuxInt)
if !(uint64(c) <= 0x7FFFFFFF) {
continue
}
if v_0.Op != OpPPC64SRAWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVBreg {
break
}
x := v_0_0.Args[0]
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
if v_0.Op != OpPPC64SRAWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVHreg {
break
}
x := v_0_0.Args[0]
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
if v_0.Op != OpPPC64SRAWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVWreg {
break
}
x := v_0_0.Args[0]
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpPPC64MOVWreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
if v_0.Op != OpPPC64SRAWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(sizeof(x.Type) <= 32) {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c > 32) {
break
}
v.reset(OpPPC64SRDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRADconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c >= 32) {
break
}
v.reset(OpPPC64SRADconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SRDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c == 32) {
break
}
v.reset(OpPPC64SRADconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpPPC64MOVDconst)
- v.AuxInt = int64(int32(c))
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
return true
}
return false
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVWstore [off1+off2] {sym} x val mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVWstore [off1+int32(off2)] {sym} x val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(int64(off1) + off2)) {
break
}
v.reset(OpPPC64MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg3(x, val, mem)
return true
}
// match: (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
- // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // result: (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
val := v_1
mem := v_2
break
}
v.reset(OpPPC64MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem)
// result: (MOVWstorezero [off] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpPPC64MOVDconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpPPC64MOVWstorezero)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// cond: sym == nil && p.Uses == 1
// result: (MOVWstoreidx ptr idx val mem)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
- sym := v.Aux
+ sym := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64ADD {
break
// match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
// result: (MOVWstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVWreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVWstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem)
// result: (MOVWstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpPPC64MOVWZreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpPPC64MOVWstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
v_0 := v.Args[0]
// match: (MOVWstoreidx ptr (MOVDconst [c]) val mem)
// cond: is16Bit(c)
- // result: (MOVWstore [c] ptr val mem)
+ // result: (MOVWstore [int32(c)] ptr val mem)
for {
ptr := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
val := v_2
mem := v_3
if !(is16Bit(c)) {
break
}
v.reset(OpPPC64MOVWstore)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVWstoreidx (MOVDconst [c]) ptr val mem)
// cond: is16Bit(c)
- // result: (MOVWstore [c] ptr val mem)
+ // result: (MOVWstore [int32(c)] ptr val mem)
for {
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
ptr := v_1
val := v_2
mem := v_3
break
}
v.reset(OpPPC64MOVWstore)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg3(ptr, val, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVWstorezero [off1+off2] {sym} x mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVWstorezero [off1+int32(off2)] {sym} x mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpPPC64ADDconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(off1 + off2)) {
+ if !(is16Bit(int64(off1) + off2)) {
break
}
v.reset(OpPPC64MOVWstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(x, mem)
return true
}
b := v.Block
typ := &b.Func.Config.Types
// match: (MTVSRD (MOVDconst [c]))
- // result: (FMOVDconst [c])
+ // result: (FMOVDconst [math.Float64frombits(uint64(c))])
for {
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpPPC64FMOVDconst)
- v.AuxInt = c
+ v.AuxInt = float64ToAuxInt(math.Float64frombits(uint64(c)))
return true
}
// match: (MTVSRD x:(MOVDload [off] {sym} ptr mem))
if x.Op != OpPPC64MOVDload {
break
}
- off := x.AuxInt
- sym := x.Aux
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
mem := x.Args[1]
ptr := x.Args[0]
if !(x.Uses == 1 && clobber(x)) {
b = x.Block
v0 := b.NewValue0(x.Pos, OpPPC64FMOVDload, typ.Float64)
v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
func rewriteValuePPC64_OpPPC64MaskIfNotCarry(v *Value) bool {
v_0 := v.Args[0]
// match: (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _)))
- // cond: c < 0 && d > 0 && c + d < 0
+ // cond: c < 0 && d > 0 && int64(c) + d < 0
// result: (MOVDconst [-1])
for {
if v_0.Op != OpPPC64ADDconstForCarry {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt16(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- d := v_0_0.AuxInt
- if !(c < 0 && d > 0 && c+d < 0) {
+ d := auxIntToInt64(v_0_0.AuxInt)
+ if !(c < 0 && d > 0 && int64(c)+d < 0) {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = -1
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
// match: (MaskIfNotCarry (FlagCarrySet))
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (MaskIfNotCarry (FlagCarryClear))
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = -1
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
return false
if v_0.Op != OpPPC64MOVDconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if v_1.Op != OpPPC64MOVDconst {
continue
}
- d := v_1.AuxInt
+ d := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64MOVDconst)
- v.AuxInt = ^(c | d)
+ v.AuxInt = int64ToAuxInt(^(c | d))
return true
}
break
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (NotEqual (FlagLT))
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (NotEqual (FlagGT))
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (NotEqual (InvertFlags x))
for {
cmp := v_0
v.reset(OpPPC64ISELB)
- v.AuxInt = 6
+ v.AuxInt = int32ToAuxInt(6)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v.AddArg2(v0, cmp)
return true
}
if v_0.Op != OpPPC64SLDconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != OpPPC64SRDconst {
continue
}
- d := v_1.AuxInt
+ d := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(d == 64-c) {
continue
}
v.reset(OpPPC64ROTLconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SLWconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != OpPPC64SRWconst {
continue
}
- d := v_1.AuxInt
+ d := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(d == 32-c) {
continue
}
v.reset(OpPPC64ROTLWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || v_0_1.AuxInt != 63 {
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
}
_ = v_1_1.Args[1]
v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 64 {
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 64 {
continue
}
v_1_1_1 := v_1_1.Args[1]
- if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 63 || y != v_1_1_1.Args[0] {
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 63 || y != v_1_1_1.Args[0] {
continue
}
v.reset(OpPPC64ROTL)
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || v_0_1.AuxInt != 31 {
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
}
_ = v_1_1.Args[1]
v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 32 {
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 32 {
continue
}
v_1_1_1 := v_1_1.Args[1]
- if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 31 || y != v_1_1_1.Args[0] {
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 31 || y != v_1_1_1.Args[0] {
continue
}
v.reset(OpPPC64ROTLW)
if v_0.Op != OpPPC64MOVDconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if v_1.Op != OpPPC64MOVDconst {
continue
}
- d := v_1.AuxInt
+ d := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64MOVDconst)
- v.AuxInt = c | d
+ v.AuxInt = int64ToAuxInt(c | d)
return true
}
break
if v_1.Op != OpPPC64MOVDconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(isU32Bit(c)) {
continue
}
v.reset(OpPPC64ORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if x0.Op != OpPPC64MOVBZload {
continue
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
o1 := v_1
- if o1.Op != OpPPC64SLWconst || o1.AuxInt != 8 {
+ if o1.Op != OpPPC64SLWconst || auxIntToInt64(o1.AuxInt) != 8 {
continue
}
x1 := o1.Args[0]
if x1.Op != OpPPC64MOVBZload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpPPC64MOVHZload, t)
v.copyOf(v0)
- v0.AuxInt = i0
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
v0.AddArg2(p, mem)
return true
}
if x0.Op != OpPPC64MOVBZload {
continue
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
o1 := v_1
- if o1.Op != OpPPC64SLDconst || o1.AuxInt != 8 {
+ if o1.Op != OpPPC64SLDconst || auxIntToInt64(o1.AuxInt) != 8 {
continue
}
x1 := o1.Args[0]
if x1.Op != OpPPC64MOVBZload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpPPC64MOVHZload, t)
v.copyOf(v0)
- v0.AuxInt = i0
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
v0.AddArg2(p, mem)
return true
}
if x0.Op != OpPPC64MOVBZload {
continue
}
- i1 := x0.AuxInt
- s := x0.Aux
+ i1 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
o1 := v_1
- if o1.Op != OpPPC64SLWconst || o1.AuxInt != 8 {
+ if o1.Op != OpPPC64SLWconst || auxIntToInt64(o1.AuxInt) != 8 {
continue
}
x1 := o1.Args[0]
if x1.Op != OpPPC64MOVBZload {
continue
}
- i0 := x1.AuxInt
- if x1.Aux != s {
+ i0 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
v0 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t)
v.copyOf(v0)
v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr)
- v1.AuxInt = i0
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
if x0.Op != OpPPC64MOVBZload {
continue
}
- i1 := x0.AuxInt
- s := x0.Aux
+ i1 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
o1 := v_1
- if o1.Op != OpPPC64SLDconst || o1.AuxInt != 8 {
+ if o1.Op != OpPPC64SLDconst || auxIntToInt64(o1.AuxInt) != 8 {
continue
}
x1 := o1.Args[0]
if x1.Op != OpPPC64MOVBZload {
continue
}
- i0 := x1.AuxInt
- if x1.Aux != s {
+ i0 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
v0 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t)
v.copyOf(v0)
v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr)
- v1.AuxInt = i0
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
if s0.Op != OpPPC64SLWconst {
continue
}
- n1 := s0.AuxInt
+ n1 := auxIntToInt64(s0.AuxInt)
x0 := s0.Args[0]
if x0.Op != OpPPC64MOVBZload {
continue
}
- i1 := x0.AuxInt
- s := x0.Aux
+ i1 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
s1 := v_1
if s1.Op != OpPPC64SLWconst {
continue
}
- n2 := s1.AuxInt
+ n2 := auxIntToInt64(s1.AuxInt)
x1 := s1.Args[0]
if x1.Op != OpPPC64MOVBZload {
continue
}
- i0 := x1.AuxInt
- if x1.Aux != s {
+ i0 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t)
v.copyOf(v0)
- v0.AuxInt = n1
+ v0.AuxInt = int64ToAuxInt(n1)
v1 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t)
v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr)
- v2.AuxInt = i0
- v2.Aux = s
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
v2.AddArg(p)
v1.AddArg2(v2, mem)
v0.AddArg(v1)
if s0.Op != OpPPC64SLDconst {
continue
}
- n1 := s0.AuxInt
+ n1 := auxIntToInt64(s0.AuxInt)
x0 := s0.Args[0]
if x0.Op != OpPPC64MOVBZload {
continue
}
- i1 := x0.AuxInt
- s := x0.Aux
+ i1 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
s1 := v_1
if s1.Op != OpPPC64SLDconst {
continue
}
- n2 := s1.AuxInt
+ n2 := auxIntToInt64(s1.AuxInt)
x1 := s1.Args[0]
if x1.Op != OpPPC64MOVBZload {
continue
}
- i0 := x1.AuxInt
- if x1.Aux != s {
+ i0 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t)
v.copyOf(v0)
- v0.AuxInt = n1
+ v0.AuxInt = int64ToAuxInt(n1)
v1 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t)
v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr)
- v2.AuxInt = i0
- v2.Aux = s
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
v2.AddArg(p)
v1.AddArg2(v2, mem)
v0.AddArg(v1)
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
s1 := v_0
- if s1.Op != OpPPC64SLWconst || s1.AuxInt != 24 {
+ if s1.Op != OpPPC64SLWconst || auxIntToInt64(s1.AuxInt) != 24 {
continue
}
x2 := s1.Args[0]
if x2.Op != OpPPC64MOVBZload {
continue
}
- i3 := x2.AuxInt
- s := x2.Aux
+ i3 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
mem := x2.Args[1]
p := x2.Args[0]
o0 := v_1
o0_1 := o0.Args[1]
for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
s0 := o0_0
- if s0.Op != OpPPC64SLWconst || s0.AuxInt != 16 {
+ if s0.Op != OpPPC64SLWconst || auxIntToInt64(s0.AuxInt) != 16 {
continue
}
x1 := s0.Args[0]
if x1.Op != OpPPC64MOVBZload {
continue
}
- i2 := x1.AuxInt
- if x1.Aux != s {
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
if x0.Op != OpPPC64MOVHZload {
continue
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
b = mergePoint(b, x0, x1, x2)
v0 := b.NewValue0(x0.Pos, OpPPC64MOVWZload, t)
v.copyOf(v0)
- v0.AuxInt = i0
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
v0.AddArg2(p, mem)
return true
}
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
s1 := v_0
- if s1.Op != OpPPC64SLDconst || s1.AuxInt != 24 {
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 24 {
continue
}
x2 := s1.Args[0]
if x2.Op != OpPPC64MOVBZload {
continue
}
- i3 := x2.AuxInt
- s := x2.Aux
+ i3 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
mem := x2.Args[1]
p := x2.Args[0]
o0 := v_1
o0_1 := o0.Args[1]
for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
s0 := o0_0
- if s0.Op != OpPPC64SLDconst || s0.AuxInt != 16 {
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 16 {
continue
}
x1 := s0.Args[0]
if x1.Op != OpPPC64MOVBZload {
continue
}
- i2 := x1.AuxInt
- if x1.Aux != s {
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
if x0.Op != OpPPC64MOVHZload {
continue
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
b = mergePoint(b, x0, x1, x2)
v0 := b.NewValue0(x0.Pos, OpPPC64MOVWZload, t)
v.copyOf(v0)
- v0.AuxInt = i0
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
v0.AddArg2(p, mem)
return true
}
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
s1 := v_0
- if s1.Op != OpPPC64SLWconst || s1.AuxInt != 24 {
+ if s1.Op != OpPPC64SLWconst || auxIntToInt64(s1.AuxInt) != 24 {
continue
}
x2 := s1.Args[0]
if x2.Op != OpPPC64MOVBZload {
continue
}
- i0 := x2.AuxInt
- s := x2.Aux
+ i0 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
mem := x2.Args[1]
p := x2.Args[0]
o0 := v_1
o0_1 := o0.Args[1]
for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
s0 := o0_0
- if s0.Op != OpPPC64SLWconst || s0.AuxInt != 16 {
+ if s0.Op != OpPPC64SLWconst || auxIntToInt64(s0.AuxInt) != 16 {
continue
}
x1 := s0.Args[0]
if x1.Op != OpPPC64MOVBZload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr {
continue
}
- i2 := x0_0.AuxInt
- if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ i2 := auxIntToInt32(x0_0.AuxInt)
+ if auxToSym(x0_0.Aux) != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
continue
}
b = mergePoint(b, x0, x1, x2)
v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t)
v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr)
- v1.AuxInt = i0
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
s1 := v_0
- if s1.Op != OpPPC64SLDconst || s1.AuxInt != 24 {
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 24 {
continue
}
x2 := s1.Args[0]
if x2.Op != OpPPC64MOVBZload {
continue
}
- i0 := x2.AuxInt
- s := x2.Aux
+ i0 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
mem := x2.Args[1]
p := x2.Args[0]
o0 := v_1
o0_1 := o0.Args[1]
for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
s0 := o0_0
- if s0.Op != OpPPC64SLDconst || s0.AuxInt != 16 {
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 16 {
continue
}
x1 := s0.Args[0]
if x1.Op != OpPPC64MOVBZload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr {
continue
}
- i2 := x0_0.AuxInt
- if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ i2 := auxIntToInt32(x0_0.AuxInt)
+ if auxToSym(x0_0.Aux) != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
continue
}
b = mergePoint(b, x0, x1, x2)
v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t)
v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr)
- v1.AuxInt = i0
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
if x0.Op != OpPPC64MOVBZload {
continue
}
- i3 := x0.AuxInt
- s := x0.Aux
+ i3 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
o0 := v_1
o0_1 := o0.Args[1]
for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
s0 := o0_0
- if s0.Op != OpPPC64SLWconst || s0.AuxInt != 8 {
+ if s0.Op != OpPPC64SLWconst || auxIntToInt64(s0.AuxInt) != 8 {
continue
}
x1 := s0.Args[0]
if x1.Op != OpPPC64MOVBZload {
continue
}
- i2 := x1.AuxInt
- if x1.Aux != s {
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
continue
}
s1 := o0_1
- if s1.Op != OpPPC64SLWconst || s1.AuxInt != 16 {
+ if s1.Op != OpPPC64SLWconst || auxIntToInt64(s1.AuxInt) != 16 {
continue
}
x2 := s1.Args[0]
if x2_0.Op != OpPPC64MOVDaddr || x2_0.Type != typ.Uintptr {
continue
}
- i0 := x2_0.AuxInt
- if x2_0.Aux != s || p != x2_0.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ i0 := auxIntToInt32(x2_0.AuxInt)
+ if auxToSym(x2_0.Aux) != s || p != x2_0.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
continue
}
b = mergePoint(b, x0, x1, x2)
v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t)
v.copyOf(v0)
v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr)
- v1.AuxInt = i0
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
if x0.Op != OpPPC64MOVBZload {
continue
}
- i3 := x0.AuxInt
- s := x0.Aux
+ i3 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
o0 := v_1
o0_1 := o0.Args[1]
for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
s0 := o0_0
- if s0.Op != OpPPC64SLDconst || s0.AuxInt != 8 {
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 8 {
continue
}
x1 := s0.Args[0]
if x1.Op != OpPPC64MOVBZload {
continue
}
- i2 := x1.AuxInt
- if x1.Aux != s {
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
continue
}
s1 := o0_1
- if s1.Op != OpPPC64SLDconst || s1.AuxInt != 16 {
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 16 {
continue
}
x2 := s1.Args[0]
if x2_0.Op != OpPPC64MOVDaddr || x2_0.Type != typ.Uintptr {
continue
}
- i0 := x2_0.AuxInt
- if x2_0.Aux != s || p != x2_0.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ i0 := auxIntToInt32(x2_0.AuxInt)
+ if auxToSym(x2_0.Aux) != s || p != x2_0.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
continue
}
b = mergePoint(b, x0, x1, x2)
v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t)
v.copyOf(v0)
v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr)
- v1.AuxInt = i0
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
s2 := v_0
- if s2.Op != OpPPC64SLDconst || s2.AuxInt != 32 {
+ if s2.Op != OpPPC64SLDconst || auxIntToInt64(s2.AuxInt) != 32 {
continue
}
x2 := s2.Args[0]
if x2.Op != OpPPC64MOVBZload {
continue
}
- i3 := x2.AuxInt
- s := x2.Aux
+ i3 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
mem := x2.Args[1]
p := x2.Args[0]
o0 := v_1
o0_1 := o0.Args[1]
for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
s1 := o0_0
- if s1.Op != OpPPC64SLDconst || s1.AuxInt != 40 {
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 40 {
continue
}
x1 := s1.Args[0]
if x1.Op != OpPPC64MOVBZload {
continue
}
- i2 := x1.AuxInt
- if x1.Aux != s {
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
continue
}
s0 := o0_1
- if s0.Op != OpPPC64SLDconst || s0.AuxInt != 48 {
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 48 {
continue
}
x0 := s0.Args[0]
if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr {
continue
}
- i0 := x0_0.AuxInt
- if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, s2, o0)) {
+ i0 := auxIntToInt32(x0_0.AuxInt)
+ if auxToSym(x0_0.Aux) != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, s2, o0)) {
continue
}
b = mergePoint(b, x0, x1, x2)
v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t)
v.copyOf(v0)
- v0.AuxInt = 32
+ v0.AuxInt = int64ToAuxInt(32)
v1 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t)
v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr)
- v2.AuxInt = i0
- v2.Aux = s
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
v2.AddArg(p)
v1.AddArg2(v2, mem)
v0.AddArg(v1)
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
s2 := v_0
- if s2.Op != OpPPC64SLDconst || s2.AuxInt != 56 {
+ if s2.Op != OpPPC64SLDconst || auxIntToInt64(s2.AuxInt) != 56 {
continue
}
x2 := s2.Args[0]
if x2.Op != OpPPC64MOVBZload {
continue
}
- i0 := x2.AuxInt
- s := x2.Aux
+ i0 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
mem := x2.Args[1]
p := x2.Args[0]
o0 := v_1
o0_1 := o0.Args[1]
for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
s1 := o0_0
- if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 {
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 48 {
continue
}
x1 := s1.Args[0]
if x1.Op != OpPPC64MOVBZload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
continue
}
s0 := o0_1
- if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 {
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 32 {
continue
}
x0 := s0.Args[0]
if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr {
continue
}
- i2 := x0_0.AuxInt
- if x0_0.Aux != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, s2, o0)) {
+ i2 := auxIntToInt32(x0_0.AuxInt)
+ if auxToSym(x0_0.Aux) != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, s2, o0)) {
continue
}
b = mergePoint(b, x0, x1, x2)
v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t)
v.copyOf(v0)
- v0.AuxInt = 32
+ v0.AuxInt = int64ToAuxInt(32)
v1 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t)
v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr)
- v2.AuxInt = i0
- v2.Aux = s
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
v2.AddArg(p)
v1.AddArg2(v2, mem)
v0.AddArg(v1)
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
s6 := v_0
- if s6.Op != OpPPC64SLDconst || s6.AuxInt != 56 {
+ if s6.Op != OpPPC64SLDconst || auxIntToInt64(s6.AuxInt) != 56 {
continue
}
x7 := s6.Args[0]
if x7.Op != OpPPC64MOVBZload {
continue
}
- i7 := x7.AuxInt
- s := x7.Aux
+ i7 := auxIntToInt32(x7.AuxInt)
+ s := auxToSym(x7.Aux)
mem := x7.Args[1]
p := x7.Args[0]
o5 := v_1
o5_1 := o5.Args[1]
for _i1 := 0; _i1 <= 1; _i1, o5_0, o5_1 = _i1+1, o5_1, o5_0 {
s5 := o5_0
- if s5.Op != OpPPC64SLDconst || s5.AuxInt != 48 {
+ if s5.Op != OpPPC64SLDconst || auxIntToInt64(s5.AuxInt) != 48 {
continue
}
x6 := s5.Args[0]
if x6.Op != OpPPC64MOVBZload {
continue
}
- i6 := x6.AuxInt
- if x6.Aux != s {
+ i6 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
continue
}
_ = x6.Args[1]
o4_1 := o4.Args[1]
for _i2 := 0; _i2 <= 1; _i2, o4_0, o4_1 = _i2+1, o4_1, o4_0 {
s4 := o4_0
- if s4.Op != OpPPC64SLDconst || s4.AuxInt != 40 {
+ if s4.Op != OpPPC64SLDconst || auxIntToInt64(s4.AuxInt) != 40 {
continue
}
x5 := s4.Args[0]
if x5.Op != OpPPC64MOVBZload {
continue
}
- i5 := x5.AuxInt
- if x5.Aux != s {
+ i5 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
continue
}
_ = x5.Args[1]
o3_1 := o3.Args[1]
for _i3 := 0; _i3 <= 1; _i3, o3_0, o3_1 = _i3+1, o3_1, o3_0 {
s3 := o3_0
- if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 {
+ if s3.Op != OpPPC64SLDconst || auxIntToInt64(s3.AuxInt) != 32 {
continue
}
x4 := s3.Args[0]
if x4.Op != OpPPC64MOVBZload {
continue
}
- i4 := x4.AuxInt
- if x4.Aux != s {
+ i4 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
continue
}
_ = x4.Args[1]
if x0.Op != OpPPC64MOVWZload {
continue
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
b = mergePoint(b, x0, x4, x5, x6, x7)
v0 := b.NewValue0(x0.Pos, OpPPC64MOVDload, t)
v.copyOf(v0)
- v0.AuxInt = i0
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
v0.AddArg2(p, mem)
return true
}
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
s0 := v_0
- if s0.Op != OpPPC64SLDconst || s0.AuxInt != 56 {
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 56 {
continue
}
x0 := s0.Args[0]
if x0.Op != OpPPC64MOVBZload {
continue
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
o0 := v_1
o0_1 := o0.Args[1]
for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
s1 := o0_0
- if s1.Op != OpPPC64SLDconst || s1.AuxInt != 48 {
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 48 {
continue
}
x1 := s1.Args[0]
if x1.Op != OpPPC64MOVBZload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
o1_1 := o1.Args[1]
for _i2 := 0; _i2 <= 1; _i2, o1_0, o1_1 = _i2+1, o1_1, o1_0 {
s2 := o1_0
- if s2.Op != OpPPC64SLDconst || s2.AuxInt != 40 {
+ if s2.Op != OpPPC64SLDconst || auxIntToInt64(s2.AuxInt) != 40 {
continue
}
x2 := s2.Args[0]
if x2.Op != OpPPC64MOVBZload {
continue
}
- i2 := x2.AuxInt
- if x2.Aux != s {
+ i2 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
o2_1 := o2.Args[1]
for _i3 := 0; _i3 <= 1; _i3, o2_0, o2_1 = _i3+1, o2_1, o2_0 {
s3 := o2_0
- if s3.Op != OpPPC64SLDconst || s3.AuxInt != 32 {
+ if s3.Op != OpPPC64SLDconst || auxIntToInt64(s3.AuxInt) != 32 {
continue
}
x3 := s3.Args[0]
if x3.Op != OpPPC64MOVBZload {
continue
}
- i3 := x3.AuxInt
- if x3.Aux != s {
+ i3 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr {
continue
}
- i4 := x4_0.AuxInt
+ i4 := auxIntToInt32(x4_0.AuxInt)
if p != x4_0.Args[0] || mem != x4.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, o0, o1, o2, s0, s1, s2, s3)) {
continue
}
v0 := b.NewValue0(x4.Pos, OpPPC64MOVDBRload, t)
v.copyOf(v0)
v1 := b.NewValue0(x4.Pos, OpPPC64MOVDaddr, typ.Uintptr)
- v1.AuxInt = i0
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
if x7.Op != OpPPC64MOVBZload {
continue
}
- i7 := x7.AuxInt
- s := x7.Aux
+ i7 := auxIntToInt32(x7.AuxInt)
+ s := auxToSym(x7.Aux)
mem := x7.Args[1]
p := x7.Args[0]
o5 := v_1
o5_1 := o5.Args[1]
for _i1 := 0; _i1 <= 1; _i1, o5_0, o5_1 = _i1+1, o5_1, o5_0 {
s6 := o5_0
- if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 {
+ if s6.Op != OpPPC64SLDconst || auxIntToInt64(s6.AuxInt) != 8 {
continue
}
x6 := s6.Args[0]
if x6.Op != OpPPC64MOVBZload {
continue
}
- i6 := x6.AuxInt
- if x6.Aux != s {
+ i6 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
continue
}
_ = x6.Args[1]
o4_1 := o4.Args[1]
for _i2 := 0; _i2 <= 1; _i2, o4_0, o4_1 = _i2+1, o4_1, o4_0 {
s5 := o4_0
- if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 {
+ if s5.Op != OpPPC64SLDconst || auxIntToInt64(s5.AuxInt) != 16 {
continue
}
x5 := s5.Args[0]
if x5.Op != OpPPC64MOVBZload {
continue
}
- i5 := x5.AuxInt
- if x5.Aux != s {
+ i5 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
continue
}
_ = x5.Args[1]
o3_1 := o3.Args[1]
for _i3 := 0; _i3 <= 1; _i3, o3_0, o3_1 = _i3+1, o3_1, o3_0 {
s4 := o3_0
- if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 {
+ if s4.Op != OpPPC64SLDconst || auxIntToInt64(s4.AuxInt) != 24 {
continue
}
x4 := s4.Args[0]
if x4.Op != OpPPC64MOVBZload {
continue
}
- i4 := x4.AuxInt
- if x4.Aux != s {
+ i4 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
continue
}
_ = x4.Args[1]
continue
}
s0 := o3_1
- if s0.Op != OpPPC64SLWconst || s0.AuxInt != 32 {
+ if s0.Op != OpPPC64SLWconst || auxIntToInt64(s0.AuxInt) != 32 {
continue
}
x3 := s0.Args[0]
if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr {
continue
}
- i0 := x3_0.AuxInt
- if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)) {
+ i0 := auxIntToInt32(x3_0.AuxInt)
+ if auxToSym(x3_0.Aux) != s || p != x3_0.Args[0] || mem != x3.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)) {
continue
}
b = mergePoint(b, x3, x4, x5, x6, x7)
v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t)
v.copyOf(v0)
v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr)
- v1.AuxInt = i0
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
if x7.Op != OpPPC64MOVBZload {
continue
}
- i7 := x7.AuxInt
- s := x7.Aux
+ i7 := auxIntToInt32(x7.AuxInt)
+ s := auxToSym(x7.Aux)
mem := x7.Args[1]
p := x7.Args[0]
o5 := v_1
o5_1 := o5.Args[1]
for _i1 := 0; _i1 <= 1; _i1, o5_0, o5_1 = _i1+1, o5_1, o5_0 {
s6 := o5_0
- if s6.Op != OpPPC64SLDconst || s6.AuxInt != 8 {
+ if s6.Op != OpPPC64SLDconst || auxIntToInt64(s6.AuxInt) != 8 {
continue
}
x6 := s6.Args[0]
if x6.Op != OpPPC64MOVBZload {
continue
}
- i6 := x6.AuxInt
- if x6.Aux != s {
+ i6 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
continue
}
_ = x6.Args[1]
o4_1 := o4.Args[1]
for _i2 := 0; _i2 <= 1; _i2, o4_0, o4_1 = _i2+1, o4_1, o4_0 {
s5 := o4_0
- if s5.Op != OpPPC64SLDconst || s5.AuxInt != 16 {
+ if s5.Op != OpPPC64SLDconst || auxIntToInt64(s5.AuxInt) != 16 {
continue
}
x5 := s5.Args[0]
if x5.Op != OpPPC64MOVBZload {
continue
}
- i5 := x5.AuxInt
- if x5.Aux != s {
+ i5 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
continue
}
_ = x5.Args[1]
o3_1 := o3.Args[1]
for _i3 := 0; _i3 <= 1; _i3, o3_0, o3_1 = _i3+1, o3_1, o3_0 {
s4 := o3_0
- if s4.Op != OpPPC64SLDconst || s4.AuxInt != 24 {
+ if s4.Op != OpPPC64SLDconst || auxIntToInt64(s4.AuxInt) != 24 {
continue
}
x4 := s4.Args[0]
if x4.Op != OpPPC64MOVBZload {
continue
}
- i4 := x4.AuxInt
- if x4.Aux != s {
+ i4 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
continue
}
_ = x4.Args[1]
continue
}
s0 := o3_1
- if s0.Op != OpPPC64SLDconst || s0.AuxInt != 32 {
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 32 {
continue
}
x3 := s0.Args[0]
if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr {
continue
}
- i0 := x3_0.AuxInt
- if x3_0.Aux != s || p != x3_0.Args[0] || mem != x3.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)) {
+ i0 := auxIntToInt32(x3_0.AuxInt)
+ if auxToSym(x3_0.Aux) != s || p != x3_0.Args[0] || mem != x3.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)) {
continue
}
b = mergePoint(b, x3, x4, x5, x6, x7)
v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t)
v.copyOf(v0)
v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr)
- v1.AuxInt = i0
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
// result: x
for {
x := v_0
- if v_1.Op != OpPPC64MOVDconst || v_1.AuxInt != -1 {
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
break
}
v.copyOf(x)
if v_0.Op != OpPPC64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if v_1.Op != OpPPC64MOVDconst {
break
}
- d := v_1.AuxInt
+ d := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64MOVDconst)
- v.AuxInt = c | ^d
+ v.AuxInt = int64ToAuxInt(c | ^d)
return true
}
return false
// match: (ORconst [c] (ORconst [d] x))
// result: (ORconst [c|d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64ORconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpPPC64ORconst)
- v.AuxInt = c | d
+ v.AuxInt = int64ToAuxInt(c | d)
v.AddArg(x)
return true
}
// match: (ORconst [-1] _)
// result: (MOVDconst [-1])
for {
- if v.AuxInt != -1 {
+ if auxIntToInt64(v.AuxInt) != -1 {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = -1
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
// match: (ORconst [0] x)
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
x := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64ROTLconst)
- v.AuxInt = c & 63
+ v.AuxInt = int64ToAuxInt(c & 63)
v.AddArg(x)
return true
}
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64ROTLWconst)
- v.AuxInt = c & 31
+ v.AuxInt = int64ToAuxInt(c & 31)
v.AddArg(x)
return true
}
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64SLDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64SLWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64SRADconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64SRDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(is32Bit(-c)) {
break
}
v.reset(OpPPC64ADDconst)
- v.AuxInt = -c
+ v.AuxInt = int64ToAuxInt(-c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SLDconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != OpPPC64SRDconst {
continue
}
- d := v_1.AuxInt
+ d := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(d == 64-c) {
continue
}
v.reset(OpPPC64ROTLconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpPPC64SLWconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != OpPPC64SRWconst {
continue
}
- d := v_1.AuxInt
+ d := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(d == 32-c) {
continue
}
v.reset(OpPPC64ROTLWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || v_0_1.AuxInt != 63 {
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
}
_ = v_1_1.Args[1]
v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 64 {
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 64 {
continue
}
v_1_1_1 := v_1_1.Args[1]
- if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 63 || y != v_1_1_1.Args[0] {
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 63 || y != v_1_1_1.Args[0] {
continue
}
v.reset(OpPPC64ROTL)
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || v_0_1.AuxInt != 31 {
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
}
_ = v_1_1.Args[1]
v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpPPC64MOVDconst || v_1_1_0.AuxInt != 32 {
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 32 {
continue
}
v_1_1_1 := v_1_1.Args[1]
- if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || v_1_1_1.AuxInt != 31 || y != v_1_1_1.Args[0] {
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 31 || y != v_1_1_1.Args[0] {
continue
}
v.reset(OpPPC64ROTLW)
if v_0.Op != OpPPC64MOVDconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if v_1.Op != OpPPC64MOVDconst {
continue
}
- d := v_1.AuxInt
+ d := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64MOVDconst)
- v.AuxInt = c ^ d
+ v.AuxInt = int64ToAuxInt(c ^ d)
return true
}
break
if v_1.Op != OpPPC64MOVDconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(isU32Bit(c)) {
continue
}
v.reset(OpPPC64XORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (XORconst [c] (XORconst [d] x))
// result: (XORconst [c^d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64XORconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpPPC64XORconst)
- v.AuxInt = c ^ d
+ v.AuxInt = int64ToAuxInt(c ^ d)
v.AddArg(x)
return true
}
// match: (XORconst [0] x)
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
x := v_0
// cond: boundsABI(kind) == 0
// result: (LoweredPanicBoundsA [kind] x y mem)
for {
- kind := v.AuxInt
+ kind := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
mem := v_2
break
}
v.reset(OpPPC64LoweredPanicBoundsA)
- v.AuxInt = kind
+ v.AuxInt = int64ToAuxInt(kind)
v.AddArg3(x, y, mem)
return true
}
// cond: boundsABI(kind) == 1
// result: (LoweredPanicBoundsB [kind] x y mem)
for {
- kind := v.AuxInt
+ kind := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
mem := v_2
break
}
v.reset(OpPPC64LoweredPanicBoundsB)
- v.AuxInt = kind
+ v.AuxInt = int64ToAuxInt(kind)
v.AddArg3(x, y, mem)
return true
}
// cond: boundsABI(kind) == 2
// result: (LoweredPanicBoundsC [kind] x y mem)
for {
- kind := v.AuxInt
+ kind := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
mem := v_2
break
}
v.reset(OpPPC64LoweredPanicBoundsC)
- v.AuxInt = kind
+ v.AuxInt = int64ToAuxInt(kind)
v.AddArg3(x, y, mem)
return true
}
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpOr16)
v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = c & 15
+ v1.AuxInt = int64ToAuxInt(c & 15)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = -c & 15
+ v3.AuxInt = int64ToAuxInt(-c & 15)
v2.AddArg2(x, v3)
v.AddArg2(v0, v2)
return true
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64ROTLWconst)
- v.AuxInt = c & 31
+ v.AuxInt = int64ToAuxInt(c & 31)
v.AddArg(x)
return true
}
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64ROTLconst)
- v.AuxInt = c & 63
+ v.AuxInt = int64ToAuxInt(c & 63)
v.AddArg(x)
return true
}
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpOr8)
v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = c & 7
+ v1.AuxInt = int64ToAuxInt(c & 7)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = -c & 7
+ v3.AuxInt = int64ToAuxInt(-c & 7)
v2.AddArg2(x, v3)
v.AddArg2(v0, v2)
return true
v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v5.AuxInt = 16
+ v5.AuxInt = int64ToAuxInt(16)
v3.AddArg2(v4, v5)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 16) {
break
}
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 16
+ v4.AuxInt = int64ToAuxInt(16)
v3.AddArg2(y, v4)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 16) {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (Rsh16Ux64 x (MOVDconst [c]))
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) < 16) {
break
}
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 16
+ v4.AuxInt = int64ToAuxInt(16)
v3.AddArg2(y, v4)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v5.AuxInt = 16
+ v5.AuxInt = int64ToAuxInt(16)
v3.AddArg2(v4, v5)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v5.AuxInt = 16
+ v5.AuxInt = int64ToAuxInt(16)
v3.AddArg2(v4, v5)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 16) {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 16
+ v4.AuxInt = int64ToAuxInt(16)
v3.AddArg2(y, v4)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 16) {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = 63
+ v.AuxInt = int64ToAuxInt(63)
v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) < 16) {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 16
+ v4.AuxInt = int64ToAuxInt(16)
v3.AddArg2(y, v4)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v5.AuxInt = 16
+ v5.AuxInt = int64ToAuxInt(16)
v3.AddArg2(v4, v5)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
y := v_1
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 32
+ v4.AuxInt = int64ToAuxInt(32)
v2.AddArg2(v3, v4)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 32) {
break
}
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
y := v_1
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = 32
+ v3.AuxInt = int64ToAuxInt(32)
v2.AddArg2(y, v3)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 32) {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (Rsh32Ux64 x (MOVDconst [c]))
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) < 32) {
break
}
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
v_1_1 := v_1.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
y := v_1_0
- if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 31 {
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 31 {
continue
}
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32)
- v0.AuxInt = 31
+ v0.AuxInt = int64ToAuxInt(31)
v0.AddArg(y)
v.AddArg2(x, v0)
return true
// result: (SRW x (ANDconst <typ.UInt> [31] y))
for {
x := v_0
- if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || v_1.AuxInt != 31 {
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 31 {
break
}
y := v_1.Args[0]
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
- v0.AuxInt = 31
+ v0.AuxInt = int64ToAuxInt(31)
v0.AddArg(y)
v.AddArg2(x, v0)
return true
}
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 32 {
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 32 {
break
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || v_1_1.AuxInt != 31 {
+ if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 31 {
break
}
y := v_1_1.Args[0]
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = 32
+ v1.AuxInt = int64ToAuxInt(32)
v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
- v2.AuxInt = 31
+ v2.AuxInt = int64ToAuxInt(31)
v2.AddArg(y)
v0.AddArg2(v1, v2)
v.AddArg2(x, v0)
}
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 32 {
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 32 {
break
}
v_1_1 := v_1.Args[1]
v_1_1_1 := v_1_1.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 {
y := v_1_1_0
- if v_1_1_1.Op != OpPPC64MOVDconst || v_1_1_1.AuxInt != 31 {
+ if v_1_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_1.AuxInt) != 31 {
continue
}
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = 32
+ v1.AuxInt = int64ToAuxInt(32)
v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
- v2.AuxInt = 31
+ v2.AuxInt = int64ToAuxInt(31)
v2.AddArg(y)
v0.AddArg2(v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = 32
+ v3.AuxInt = int64ToAuxInt(32)
v2.AddArg2(y, v3)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 32
+ v4.AuxInt = int64ToAuxInt(32)
v2.AddArg2(v3, v4)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 32
+ v4.AuxInt = int64ToAuxInt(32)
v2.AddArg2(v3, v4)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 32) {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
y := v_1
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = 32
+ v3.AuxInt = int64ToAuxInt(32)
v2.AddArg2(y, v3)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 32) {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = 63
+ v.AuxInt = int64ToAuxInt(63)
v.AddArg(x)
return true
}
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) < 32) {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
v_1_1 := v_1.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
y := v_1_0
- if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 31 {
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 31 {
continue
}
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32)
- v0.AuxInt = 31
+ v0.AuxInt = int64ToAuxInt(31)
v0.AddArg(y)
v.AddArg2(x, v0)
return true
// result: (SRAW x (ANDconst <typ.UInt> [31] y))
for {
x := v_0
- if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || v_1.AuxInt != 31 {
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 31 {
break
}
y := v_1.Args[0]
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
- v0.AuxInt = 31
+ v0.AuxInt = int64ToAuxInt(31)
v0.AddArg(y)
v.AddArg2(x, v0)
return true
}
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 32 {
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 32 {
break
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || v_1_1.AuxInt != 31 {
+ if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 31 {
break
}
y := v_1_1.Args[0]
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = 32
+ v1.AuxInt = int64ToAuxInt(32)
v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
- v2.AuxInt = 31
+ v2.AuxInt = int64ToAuxInt(31)
v2.AddArg(y)
v0.AddArg2(v1, v2)
v.AddArg2(x, v0)
}
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 32 {
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 32 {
break
}
v_1_1 := v_1.Args[1]
v_1_1_1 := v_1_1.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 {
y := v_1_1_0
- if v_1_1_1.Op != OpPPC64MOVDconst || v_1_1_1.AuxInt != 31 {
+ if v_1_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_1.AuxInt) != 31 {
continue
}
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = 32
+ v1.AuxInt = int64ToAuxInt(32)
v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
- v2.AuxInt = 31
+ v2.AuxInt = int64ToAuxInt(31)
v2.AddArg(y)
v0.AddArg2(v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = 32
+ v3.AuxInt = int64ToAuxInt(32)
v2.AddArg2(y, v3)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 32
+ v4.AuxInt = int64ToAuxInt(32)
v2.AddArg2(v3, v4)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SRD)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 64
+ v4.AuxInt = int64ToAuxInt(64)
v2.AddArg2(v3, v4)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 64) {
break
}
v.reset(OpPPC64SRDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
y := v_1
v.reset(OpPPC64SRD)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v2.AddArg2(y, v3)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 64) {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (Rsh64Ux64 x (MOVDconst [c]))
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) < 64) {
break
}
v.reset(OpPPC64SRDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
v_1_1 := v_1.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
y := v_1_0
- if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 63 {
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 63 {
continue
}
v.reset(OpPPC64SRD)
v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64)
- v0.AuxInt = 63
+ v0.AuxInt = int64ToAuxInt(63)
v0.AddArg(y)
v.AddArg2(x, v0)
return true
// result: (SRD x (ANDconst <typ.UInt> [63] y))
for {
x := v_0
- if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || v_1.AuxInt != 63 {
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 63 {
break
}
y := v_1.Args[0]
v.reset(OpPPC64SRD)
v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
- v0.AuxInt = 63
+ v0.AuxInt = int64ToAuxInt(63)
v0.AddArg(y)
v.AddArg2(x, v0)
return true
}
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 64 {
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 64 {
break
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || v_1_1.AuxInt != 63 {
+ if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 63 {
break
}
y := v_1_1.Args[0]
v.reset(OpPPC64SRD)
v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = 64
+ v1.AuxInt = int64ToAuxInt(64)
v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
- v2.AuxInt = 63
+ v2.AuxInt = int64ToAuxInt(63)
v2.AddArg(y)
v0.AddArg2(v1, v2)
v.AddArg2(x, v0)
}
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 64 {
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 64 {
break
}
v_1_1 := v_1.Args[1]
v_1_1_1 := v_1_1.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 {
y := v_1_1_0
- if v_1_1_1.Op != OpPPC64MOVDconst || v_1_1_1.AuxInt != 63 {
+ if v_1_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_1.AuxInt) != 63 {
continue
}
v.reset(OpPPC64SRD)
v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = 64
+ v1.AuxInt = int64ToAuxInt(64)
v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
- v2.AuxInt = 63
+ v2.AuxInt = int64ToAuxInt(63)
v2.AddArg(y)
v0.AddArg2(v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SRD)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v2.AddArg2(y, v3)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SRD)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 64
+ v4.AuxInt = int64ToAuxInt(64)
v2.AddArg2(v3, v4)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SRAD)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 64
+ v4.AuxInt = int64ToAuxInt(64)
v2.AddArg2(v3, v4)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 64) {
break
}
v.reset(OpPPC64SRADconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
y := v_1
v.reset(OpPPC64SRAD)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v2.AddArg2(y, v3)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 64) {
break
}
v.reset(OpPPC64SRADconst)
- v.AuxInt = 63
+ v.AuxInt = int64ToAuxInt(63)
v.AddArg(x)
return true
}
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) < 64) {
break
}
v.reset(OpPPC64SRADconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
v_1_1 := v_1.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
y := v_1_0
- if v_1_1.Op != OpPPC64MOVDconst || v_1_1.AuxInt != 63 {
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 63 {
continue
}
v.reset(OpPPC64SRAD)
v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64)
- v0.AuxInt = 63
+ v0.AuxInt = int64ToAuxInt(63)
v0.AddArg(y)
v.AddArg2(x, v0)
return true
// result: (SRAD x (ANDconst <typ.UInt> [63] y))
for {
x := v_0
- if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || v_1.AuxInt != 63 {
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 63 {
break
}
y := v_1.Args[0]
v.reset(OpPPC64SRAD)
v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
- v0.AuxInt = 63
+ v0.AuxInt = int64ToAuxInt(63)
v0.AddArg(y)
v.AddArg2(x, v0)
return true
}
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 64 {
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 64 {
break
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || v_1_1.AuxInt != 63 {
+ if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 63 {
break
}
y := v_1_1.Args[0]
v.reset(OpPPC64SRAD)
v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = 64
+ v1.AuxInt = int64ToAuxInt(64)
v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
- v2.AuxInt = 63
+ v2.AuxInt = int64ToAuxInt(63)
v2.AddArg(y)
v0.AddArg2(v1, v2)
v.AddArg2(x, v0)
}
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpPPC64MOVDconst || v_1_0.AuxInt != 64 {
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 64 {
break
}
v_1_1 := v_1.Args[1]
v_1_1_1 := v_1_1.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 {
y := v_1_1_0
- if v_1_1_1.Op != OpPPC64MOVDconst || v_1_1_1.AuxInt != 63 {
+ if v_1_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_1.AuxInt) != 63 {
continue
}
v.reset(OpPPC64SRAD)
v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = 64
+ v1.AuxInt = int64ToAuxInt(64)
v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
- v2.AuxInt = 63
+ v2.AuxInt = int64ToAuxInt(63)
v2.AddArg(y)
v0.AddArg2(v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SRAD)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v2.AddArg2(y, v3)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
y := v_1
v.reset(OpPPC64SRAD)
v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 64
+ v4.AuxInt = int64ToAuxInt(64)
v2.AddArg2(v3, v4)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v5.AuxInt = 8
+ v5.AuxInt = int64ToAuxInt(8)
v3.AddArg2(v4, v5)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 8) {
break
}
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 8
+ v4.AuxInt = int64ToAuxInt(8)
v3.AddArg2(y, v4)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 8) {
break
}
v.reset(OpPPC64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (Rsh8Ux64 x (MOVDconst [c]))
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) < 8) {
break
}
v.reset(OpPPC64SRWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 8
+ v4.AuxInt = int64ToAuxInt(8)
v3.AddArg2(y, v4)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v5.AuxInt = 8
+ v5.AuxInt = int64ToAuxInt(8)
v3.AddArg2(v4, v5)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v5.AuxInt = 8
+ v5.AuxInt = int64ToAuxInt(8)
v3.AddArg2(v4, v5)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 8) {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 8
+ v4.AuxInt = int64ToAuxInt(8)
v3.AddArg2(y, v4)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 8) {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = 63
+ v.AuxInt = int64ToAuxInt(63)
v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) < 8) {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v4.AuxInt = 8
+ v4.AuxInt = int64ToAuxInt(8)
v3.AddArg2(y, v4)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
- v5.AuxInt = 8
+ v5.AuxInt = int64ToAuxInt(8)
v3.AddArg2(v4, v5)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
t := v.Type
x := v_0
v.reset(OpPPC64SRADconst)
- v.AuxInt = 63
+ v.AuxInt = int64ToAuxInt(63)
v0 := b.NewValue0(v.Pos, OpPPC64NEG, t)
v0.AddArg(x)
v.AddArg(v0)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
// result: (FMOVDstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpPPC64FMOVDstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 8 && is32BitFloat(val.Type)
+ // cond: t.Size() == 8 && is32BitFloat(val.Type)
// result: (FMOVDstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 8 && is32BitFloat(val.Type)) {
+ if !(t.Size() == 8 && is32BitFloat(val.Type)) {
break
}
v.reset(OpPPC64FMOVDstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
// result: (FMOVSstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpPPC64FMOVSstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type))
+ // cond: t.Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type))
// result: (MOVDstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type))) {
+ if !(t.Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type))) {
break
}
v.reset(OpPPC64MOVDstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 4 && is32BitInt(val.Type)
+ // cond: t.Size() == 4 && is32BitInt(val.Type)
// result: (MOVWstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 4 && is32BitInt(val.Type)) {
+ if !(t.Size() == 4 && is32BitInt(val.Type)) {
break
}
v.reset(OpPPC64MOVWstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 2
+ // cond: t.Size() == 2
// result: (MOVHstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 2) {
+ if !(t.Size() == 2) {
break
}
v.reset(OpPPC64MOVHstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 1
+ // cond: t.Size() == 1
// result: (MOVBstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 1) {
+ if !(t.Size() == 1) {
break
}
v.reset(OpPPC64MOVBstore)
// match: (Zero [0] _ mem)
// result: mem
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
mem := v_1
// match: (Zero [1] destptr mem)
// result: (MOVBstorezero destptr mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt64(v.AuxInt) != 1 {
break
}
destptr := v_0
// match: (Zero [2] destptr mem)
// result: (MOVHstorezero destptr mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt64(v.AuxInt) != 2 {
break
}
destptr := v_0
// match: (Zero [3] destptr mem)
// result: (MOVBstorezero [2] destptr (MOVHstorezero destptr mem))
for {
- if v.AuxInt != 3 {
+ if auxIntToInt64(v.AuxInt) != 3 {
break
}
destptr := v_0
mem := v_1
v.reset(OpPPC64MOVBstorezero)
- v.AuxInt = 2
+ v.AuxInt = int32ToAuxInt(2)
v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, types.TypeMem)
v0.AddArg2(destptr, mem)
v.AddArg2(destptr, v0)
// match: (Zero [4] destptr mem)
// result: (MOVWstorezero destptr mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
destptr := v_0
// match: (Zero [5] destptr mem)
// result: (MOVBstorezero [4] destptr (MOVWstorezero destptr mem))
for {
- if v.AuxInt != 5 {
+ if auxIntToInt64(v.AuxInt) != 5 {
break
}
destptr := v_0
mem := v_1
v.reset(OpPPC64MOVBstorezero)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
v0.AddArg2(destptr, mem)
v.AddArg2(destptr, v0)
// match: (Zero [6] destptr mem)
// result: (MOVHstorezero [4] destptr (MOVWstorezero destptr mem))
for {
- if v.AuxInt != 6 {
+ if auxIntToInt64(v.AuxInt) != 6 {
break
}
destptr := v_0
mem := v_1
v.reset(OpPPC64MOVHstorezero)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
v0.AddArg2(destptr, mem)
v.AddArg2(destptr, v0)
// match: (Zero [7] destptr mem)
// result: (MOVBstorezero [6] destptr (MOVHstorezero [4] destptr (MOVWstorezero destptr mem)))
for {
- if v.AuxInt != 7 {
+ if auxIntToInt64(v.AuxInt) != 7 {
break
}
destptr := v_0
mem := v_1
v.reset(OpPPC64MOVBstorezero)
- v.AuxInt = 6
+ v.AuxInt = int32ToAuxInt(6)
v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, types.TypeMem)
- v0.AuxInt = 4
+ v0.AuxInt = int32ToAuxInt(4)
v1 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
v1.AddArg2(destptr, mem)
v0.AddArg2(destptr, v1)
return true
}
// match: (Zero [8] {t} destptr mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVDstorezero destptr mem)
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
destptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpPPC64MOVDstorezero)
// match: (Zero [8] destptr mem)
// result: (MOVWstorezero [4] destptr (MOVWstorezero [0] destptr mem))
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
destptr := v_0
mem := v_1
v.reset(OpPPC64MOVWstorezero)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v0.AddArg2(destptr, mem)
v.AddArg2(destptr, v0)
return true
}
// match: (Zero [12] {t} destptr mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVWstorezero [8] destptr (MOVDstorezero [0] destptr mem))
for {
- if v.AuxInt != 12 {
+ if auxIntToInt64(v.AuxInt) != 12 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
destptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpPPC64MOVWstorezero)
- v.AuxInt = 8
+ v.AuxInt = int32ToAuxInt(8)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v0.AddArg2(destptr, mem)
v.AddArg2(destptr, v0)
return true
}
// match: (Zero [16] {t} destptr mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))
for {
- if v.AuxInt != 16 {
+ if auxIntToInt64(v.AuxInt) != 16 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
destptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpPPC64MOVDstorezero)
- v.AuxInt = 8
+ v.AuxInt = int32ToAuxInt(8)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v0.AddArg2(destptr, mem)
v.AddArg2(destptr, v0)
return true
}
// match: (Zero [24] {t} destptr mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)))
for {
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
destptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpPPC64MOVDstorezero)
- v.AuxInt = 16
+ v.AuxInt = int32ToAuxInt(16)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
- v0.AuxInt = 8
+ v0.AuxInt = int32ToAuxInt(8)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v1.AddArg2(destptr, mem)
v0.AddArg2(destptr, v1)
v.AddArg2(destptr, v0)
return true
}
// match: (Zero [32] {t} destptr mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))))
for {
- if v.AuxInt != 32 {
+ if auxIntToInt64(v.AuxInt) != 32 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
destptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpPPC64MOVDstorezero)
- v.AuxInt = 24
+ v.AuxInt = int32ToAuxInt(24)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
- v0.AuxInt = 16
+ v0.AuxInt = int32ToAuxInt(16)
v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
- v1.AuxInt = 8
+ v1.AuxInt = int32ToAuxInt(8)
v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v2.AddArg2(destptr, mem)
v1.AddArg2(destptr, v2)
v0.AddArg2(destptr, v1)
// cond: objabi.GOPPC64 <= 8 && s < 64
// result: (LoweredZeroShort [s] ptr mem)
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
ptr := v_0
mem := v_1
if !(objabi.GOPPC64 <= 8 && s < 64) {
break
}
v.reset(OpPPC64LoweredZeroShort)
- v.AuxInt = s
+ v.AuxInt = int64ToAuxInt(s)
v.AddArg2(ptr, mem)
return true
}
// cond: objabi.GOPPC64 <= 8
// result: (LoweredZero [s] ptr mem)
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
ptr := v_0
mem := v_1
if !(objabi.GOPPC64 <= 8) {
break
}
v.reset(OpPPC64LoweredZero)
- v.AuxInt = s
+ v.AuxInt = int64ToAuxInt(s)
v.AddArg2(ptr, mem)
return true
}
// cond: s < 128 && objabi.GOPPC64 >= 9
// result: (LoweredQuadZeroShort [s] ptr mem)
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
ptr := v_0
mem := v_1
if !(s < 128 && objabi.GOPPC64 >= 9) {
break
}
v.reset(OpPPC64LoweredQuadZeroShort)
- v.AuxInt = s
+ v.AuxInt = int64ToAuxInt(s)
v.AddArg2(ptr, mem)
return true
}
// cond: objabi.GOPPC64 >= 9
// result: (LoweredQuadZero [s] ptr mem)
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
ptr := v_0
mem := v_1
if !(objabi.GOPPC64 >= 9) {
break
}
v.reset(OpPPC64LoweredQuadZero)
- v.AuxInt = s
+ v.AuxInt = int64ToAuxInt(s)
v.AddArg2(ptr, mem)
return true
}
// result: (EQ (ANDCCconst [c] x) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
b.resetWithControl(BlockPPC64EQ, v0)
return true
// result: (EQ (ANDCCconst [c] x) yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
b.resetWithControl(BlockPPC64EQ, v0)
return true
// result: (EQ (ANDCCconst [c] x) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
b.resetWithControl(BlockPPC64EQ, v0)
return true
// result: (EQ (ANDCCconst [c] x) yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
b.resetWithControl(BlockPPC64EQ, v0)
return true
// result: (EQ (ANDCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
// result: (EQ (ORCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
// result: (EQ (XORCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
// result: (GE (ANDCCconst [c] x) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
b.resetWithControl(BlockPPC64GE, v0)
return true
// result: (GE (ANDCCconst [c] x) yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
b.resetWithControl(BlockPPC64GE, v0)
return true
// result: (GE (ANDCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
// result: (GE (ORCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
// result: (GE (XORCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
// result: (GT (ANDCCconst [c] x) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
b.resetWithControl(BlockPPC64GT, v0)
return true
// result: (GT (ANDCCconst [c] x) yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
b.resetWithControl(BlockPPC64GT, v0)
return true
// result: (GT (ANDCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
// result: (GT (ORCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
// result: (GT (XORCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
for {
cond := b.Controls[0]
v0 := b.NewValue0(cond.Pos, OpPPC64CMPWconst, types.TypeFlags)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v0.AddArg(cond)
b.resetWithControl(BlockPPC64NE, v0)
return true
// result: (LE (ANDCCconst [c] x) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
b.resetWithControl(BlockPPC64LE, v0)
return true
// result: (LE (ANDCCconst [c] x) yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
b.resetWithControl(BlockPPC64LE, v0)
return true
// result: (LE (ANDCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
// result: (LE (ORCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
// result: (LE (XORCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
// result: (LT (ANDCCconst [c] x) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
b.resetWithControl(BlockPPC64LT, v0)
return true
// result: (LT (ANDCCconst [c] x) yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
b.resetWithControl(BlockPPC64LT, v0)
return true
// result: (LT (ANDCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
// result: (LT (ORCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
// result: (LT (XORCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
// result: (EQ cc yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
// result: (NE cc yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
// result: (LT cc yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
// result: (LE cc yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
// result: (GT cc yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
// result: (GE cc yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
// result: (FLT cc yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
// result: (FLE cc yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
// result: (FGT cc yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
// result: (FGE cc yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
// result: (NE (ANDCCconst [c] x) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
b.resetWithControl(BlockPPC64NE, v0)
return true
// result: (NE (ANDCCconst [c] x) yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
b.resetWithControl(BlockPPC64NE, v0)
return true
// result: (NE (ANDCCconst [c] x) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
b.resetWithControl(BlockPPC64NE, v0)
return true
// result: (NE (ANDCCconst [c] x) yes no)
for b.Controls[0].Op == OpPPC64CMPWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_0.Args[0]
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(x)
b.resetWithControl(BlockPPC64NE, v0)
return true
// result: (NE (ANDCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
// result: (NE (ORCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]
// result: (NE (XORCC x y) yes no)
for b.Controls[0].Op == OpPPC64CMPconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
z := v_0.Args[0]