// * Avoid using Neq32 for writeBarrier.enabled checks.
// Lowering arithmetic
-(Add64 ...) -> (ADD ...)
-(AddPtr ...) -> (ADD ...)
-(Add32 ...) -> (ADD ...)
-(Add16 ...) -> (ADD ...)
-(Add8 ...) -> (ADD ...)
-(Add32F ...) -> (FADDS ...)
-(Add64F ...) -> (FADDD ...)
-
-(Sub64 ...) -> (SUB ...)
-(SubPtr ...) -> (SUB ...)
-(Sub32 ...) -> (SUB ...)
-(Sub16 ...) -> (SUB ...)
-(Sub8 ...) -> (SUB ...)
-(Sub32F ...) -> (FSUBS ...)
-(Sub64F ...) -> (FSUBD ...)
-
-(Mul64 ...) -> (MUL ...)
-(Mul32 ...) -> (MULW ...)
-(Mul16 x y) -> (MULW (SignExt16to32 x) (SignExt16to32 y))
-(Mul8 x y) -> (MULW (SignExt8to32 x) (SignExt8to32 y))
-(Mul32F ...) -> (FMULS ...)
-(Mul64F ...) -> (FMULD ...)
-
-(Div32F ...) -> (FDIVS ...)
-(Div64F ...) -> (FDIVD ...)
-
-(Div64 ...) -> (DIV ...)
-(Div64u ...) -> (DIVU ...)
-(Div32 ...) -> (DIVW ...)
-(Div32u ...) -> (DIVUW ...)
-(Div16 x y) -> (DIVW (SignExt16to32 x) (SignExt16to32 y))
-(Div16u x y) -> (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
-(Div8 x y) -> (DIVW (SignExt8to32 x) (SignExt8to32 y))
-(Div8u x y) -> (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y))
-
-(Hmul64 ...) -> (MULH ...)
-(Hmul64u ...) -> (MULHU ...)
-(Hmul32 x y) -> (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y)))
-(Hmul32u x y) -> (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y)))
-
-// (x + y) / 2 -> (x / 2) + (y / 2) + (x & y & 1)
-(Avg64u <t> x y) -> (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
-
-(Mod64 ...) -> (REM ...)
-(Mod64u ...) -> (REMU ...)
-(Mod32 ...) -> (REMW ...)
-(Mod32u ...) -> (REMUW ...)
-(Mod16 x y) -> (REMW (SignExt16to32 x) (SignExt16to32 y))
-(Mod16u x y) -> (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
-(Mod8 x y) -> (REMW (SignExt8to32 x) (SignExt8to32 y))
-(Mod8u x y) -> (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
-
-(And64 ...) -> (AND ...)
-(And32 ...) -> (AND ...)
-(And16 ...) -> (AND ...)
-(And8 ...) -> (AND ...)
-
-(Or64 ...) -> (OR ...)
-(Or32 ...) -> (OR ...)
-(Or16 ...) -> (OR ...)
-(Or8 ...) -> (OR ...)
-
-(Xor64 ...) -> (XOR ...)
-(Xor32 ...) -> (XOR ...)
-(Xor16 ...) -> (XOR ...)
-(Xor8 ...) -> (XOR ...)
-
-(Neg64 ...) -> (NEG ...)
-(Neg32 ...) -> (NEG ...)
-(Neg16 ...) -> (NEG ...)
-(Neg8 ...) -> (NEG ...)
-(Neg32F ...) -> (FNEGS ...)
-(Neg64F ...) -> (FNEGD ...)
-
-(Com64 ...) -> (NOT ...)
-(Com32 ...) -> (NOT ...)
-(Com16 ...) -> (NOT ...)
-(Com8 ...) -> (NOT ...)
-
-(Sqrt ...) -> (FSQRTD ...)
+(Add64 ...) => (ADD ...)
+(AddPtr ...) => (ADD ...)
+(Add32 ...) => (ADD ...)
+(Add16 ...) => (ADD ...)
+(Add8 ...) => (ADD ...)
+(Add32F ...) => (FADDS ...)
+(Add64F ...) => (FADDD ...)
+
+(Sub64 ...) => (SUB ...)
+(SubPtr ...) => (SUB ...)
+(Sub32 ...) => (SUB ...)
+(Sub16 ...) => (SUB ...)
+(Sub8 ...) => (SUB ...)
+(Sub32F ...) => (FSUBS ...)
+(Sub64F ...) => (FSUBD ...)
+
+(Mul64 ...) => (MUL ...)
+(Mul32 ...) => (MULW ...)
+(Mul16 x y) => (MULW (SignExt16to32 x) (SignExt16to32 y))
+(Mul8 x y) => (MULW (SignExt8to32 x) (SignExt8to32 y))
+(Mul32F ...) => (FMULS ...)
+(Mul64F ...) => (FMULD ...)
+
+(Div32F ...) => (FDIVS ...)
+(Div64F ...) => (FDIVD ...)
+
+(Div64 x y [false]) => (DIV x y)
+(Div64u ...) => (DIVU ...)
+(Div32 x y [false]) => (DIVW x y)
+(Div32u ...) => (DIVUW ...)
+(Div16 x y [false]) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) => (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) => (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(Hmul64 ...) => (MULH ...)
+(Hmul64u ...) => (MULHU ...)
+(Hmul32 x y) => (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y)))
+(Hmul32u x y) => (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y)))
+
+// (x + y) / 2 => (x / 2) + (y / 2) + (x & y & 1)
+(Avg64u <t> x y) => (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
+
+(Mod64 x y [false]) => (REM x y)
+(Mod64u ...) => (REMU ...)
+(Mod32 x y [false]) => (REMW x y)
+(Mod32u ...) => (REMUW ...)
+(Mod16 x y [false]) => (REMW (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) => (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) => (REMW (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) => (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(And64 ...) => (AND ...)
+(And32 ...) => (AND ...)
+(And16 ...) => (AND ...)
+(And8 ...) => (AND ...)
+
+(Or64 ...) => (OR ...)
+(Or32 ...) => (OR ...)
+(Or16 ...) => (OR ...)
+(Or8 ...) => (OR ...)
+
+(Xor64 ...) => (XOR ...)
+(Xor32 ...) => (XOR ...)
+(Xor16 ...) => (XOR ...)
+(Xor8 ...) => (XOR ...)
+
+(Neg64 ...) => (NEG ...)
+(Neg32 ...) => (NEG ...)
+(Neg16 ...) => (NEG ...)
+(Neg8 ...) => (NEG ...)
+(Neg32F ...) => (FNEGS ...)
+(Neg64F ...) => (FNEGD ...)
+
+(Com64 ...) => (NOT ...)
+(Com32 ...) => (NOT ...)
+(Com16 ...) => (NOT ...)
+(Com8 ...) => (NOT ...)
+
+(Sqrt ...) => (FSQRTD ...)
// Zero and sign extension
// Shift left until the bits we want are at the top of the register.
// We always extend to 64 bits; there's no reason not to,
// and optimization rules can then collapse some extensions.
-(SignExt8to16 <t> x) -> (SRAI [56] (SLLI <t> [56] x))
-(SignExt8to32 <t> x) -> (SRAI [56] (SLLI <t> [56] x))
-(SignExt8to64 <t> x) -> (SRAI [56] (SLLI <t> [56] x))
-(SignExt16to32 <t> x) -> (SRAI [48] (SLLI <t> [48] x))
-(SignExt16to64 <t> x) -> (SRAI [48] (SLLI <t> [48] x))
-(SignExt32to64 <t> x) -> (ADDIW [0] x)
+(SignExt8to16 <t> x) => (SRAI [56] (SLLI <t> [56] x))
+(SignExt8to32 <t> x) => (SRAI [56] (SLLI <t> [56] x))
+(SignExt8to64 <t> x) => (SRAI [56] (SLLI <t> [56] x))
+(SignExt16to32 <t> x) => (SRAI [48] (SLLI <t> [48] x))
+(SignExt16to64 <t> x) => (SRAI [48] (SLLI <t> [48] x))
+(SignExt32to64 <t> x) => (ADDIW [0] x)
-(ZeroExt8to16 <t> x) -> (SRLI [56] (SLLI <t> [56] x))
-(ZeroExt8to32 <t> x) -> (SRLI [56] (SLLI <t> [56] x))
-(ZeroExt8to64 <t> x) -> (SRLI [56] (SLLI <t> [56] x))
-(ZeroExt16to32 <t> x) -> (SRLI [48] (SLLI <t> [48] x))
-(ZeroExt16to64 <t> x) -> (SRLI [48] (SLLI <t> [48] x))
-(ZeroExt32to64 <t> x) -> (SRLI [32] (SLLI <t> [32] x))
+(ZeroExt8to16 <t> x) => (SRLI [56] (SLLI <t> [56] x))
+(ZeroExt8to32 <t> x) => (SRLI [56] (SLLI <t> [56] x))
+(ZeroExt8to64 <t> x) => (SRLI [56] (SLLI <t> [56] x))
+(ZeroExt16to32 <t> x) => (SRLI [48] (SLLI <t> [48] x))
+(ZeroExt16to64 <t> x) => (SRLI [48] (SLLI <t> [48] x))
+(ZeroExt32to64 <t> x) => (SRLI [32] (SLLI <t> [32] x))
-(Cvt32to32F ...) -> (FCVTSW ...)
-(Cvt32to64F ...) -> (FCVTDW ...)
-(Cvt64to32F ...) -> (FCVTSL ...)
-(Cvt64to64F ...) -> (FCVTDL ...)
+(Cvt32to32F ...) => (FCVTSW ...)
+(Cvt32to64F ...) => (FCVTDW ...)
+(Cvt64to32F ...) => (FCVTSL ...)
+(Cvt64to64F ...) => (FCVTDL ...)
-(Cvt32Fto32 ...) -> (FCVTWS ...)
-(Cvt32Fto64 ...) -> (FCVTLS ...)
-(Cvt64Fto32 ...) -> (FCVTWD ...)
-(Cvt64Fto64 ...) -> (FCVTLD ...)
+(Cvt32Fto32 ...) => (FCVTWS ...)
+(Cvt32Fto64 ...) => (FCVTLS ...)
+(Cvt64Fto32 ...) => (FCVTWD ...)
+(Cvt64Fto64 ...) => (FCVTLD ...)
-(Cvt32Fto64F ...) -> (FCVTDS ...)
-(Cvt64Fto32F ...) -> (FCVTSD ...)
+(Cvt32Fto64F ...) => (FCVTDS ...)
+(Cvt64Fto32F ...) => (FCVTSD ...)
-(CvtBoolToUint8 ...) -> (Copy ...)
+(CvtBoolToUint8 ...) => (Copy ...)
-(Round32F ...) -> (Copy ...)
-(Round64F ...) -> (Copy ...)
+(Round32F ...) => (Copy ...)
+(Round64F ...) => (Copy ...)
// From genericOps.go:
// "0 if arg0 == 0, -1 if arg0 > 0, undef if arg0<0"
// For positive x, bit 63 of x-1 is always 0, so the result is -1.
// For zero x, bit 63 of x-1 is 1, so the result is 0.
//
-(Slicemask <t> x) -> (NOT (SRAI <t> [63] (ADDI <t> [-1] x)))
+(Slicemask <t> x) => (NOT (SRAI <t> [63] (ADDI <t> [-1] x)))
// Truncations
// We ignore the unused high parts of registers, so truncates are just copies.
-(Trunc16to8 ...) -> (Copy ...)
-(Trunc32to8 ...) -> (Copy ...)
-(Trunc32to16 ...) -> (Copy ...)
-(Trunc64to8 ...) -> (Copy ...)
-(Trunc64to16 ...) -> (Copy ...)
-(Trunc64to32 ...) -> (Copy ...)
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
// Shifts
// If y < 64, this is the value we want. Otherwise, we want zero.
//
// So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise.
-(Lsh8x8 <t> x y) -> (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Lsh8x16 <t> x y) -> (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Lsh8x32 <t> x y) -> (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Lsh8x64 <t> x y) -> (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
-(Lsh16x8 <t> x y) -> (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Lsh16x16 <t> x y) -> (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Lsh16x32 <t> x y) -> (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Lsh16x64 <t> x y) -> (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
-(Lsh32x8 <t> x y) -> (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Lsh32x16 <t> x y) -> (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Lsh32x32 <t> x y) -> (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Lsh32x64 <t> x y) -> (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
-(Lsh64x8 <t> x y) -> (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Lsh64x16 <t> x y) -> (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Lsh64x32 <t> x y) -> (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Lsh64x64 <t> x y) -> (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+(Lsh8x8 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh8x16 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh8x32 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh8x64 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
+(Lsh16x8 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh16x16 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh16x32 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh16x64 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
+(Lsh32x8 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh32x16 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh32x32 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh32x64 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
+(Lsh64x8 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh64x16 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh64x32 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh64x64 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
// SRL only considers the bottom 6 bits of y. If y > 64, the result should
// always be 0. See Lsh above for a detailed description.
-(Rsh8Ux8 <t> x y) -> (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Rsh8Ux16 <t> x y) -> (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh8Ux32 <t> x y) -> (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh8Ux64 <t> x y) -> (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
-(Rsh16Ux8 <t> x y) -> (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Rsh16Ux16 <t> x y) -> (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh16Ux32 <t> x y) -> (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh16Ux64 <t> x y) -> (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
-(Rsh32Ux8 <t> x y) -> (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Rsh32Ux16 <t> x y) -> (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh32Ux32 <t> x y) -> (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh32Ux64 <t> x y) -> (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
-(Rsh64Ux8 <t> x y) -> (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
-(Rsh64Ux16 <t> x y) -> (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
-(Rsh64Ux32 <t> x y) -> (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
-(Rsh64Ux64 <t> x y) -> (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+(Rsh8Ux8 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh8Ux16 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh8Ux32 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh8Ux64 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
+(Rsh16Ux8 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh16Ux16 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh16Ux32 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh16Ux64 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
+(Rsh32Ux8 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh32Ux16 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh32Ux32 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh32Ux64 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
+(Rsh64Ux8 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh64Ux16 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh64Ux32 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh64Ux64 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
// SRA only considers the bottom 6 bits of y. If y > 64, the result should
// be either 0 or -1 based on the sign bit.
//
// We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
// more than the 6 bits SRA cares about.
-(Rsh8x8 <t> x y) -> (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
-(Rsh8x16 <t> x y) -> (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh8x32 <t> x y) -> (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh8x64 <t> x y) -> (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
-(Rsh16x8 <t> x y) -> (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
-(Rsh16x16 <t> x y) -> (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh16x32 <t> x y) -> (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh16x64 <t> x y) -> (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
-(Rsh32x8 <t> x y) -> (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
-(Rsh32x16 <t> x y) -> (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh32x32 <t> x y) -> (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh32x64 <t> x y) -> (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
-(Rsh64x8 <t> x y) -> (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
-(Rsh64x16 <t> x y) -> (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
-(Rsh64x32 <t> x y) -> (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
-(Rsh64x64 <t> x y) -> (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh8x8 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh8x16 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh8x32 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh8x64 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh16x8 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh16x16 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh16x32 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh16x64 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh32x8 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh32x16 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh32x32 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh32x64 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh64x8 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh64x16 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh64x32 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh64x64 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
// rotates
-(RotateLeft8 <t> x (MOVBconst [c])) -> (Or8 (Lsh8x64 <t> x (MOVBconst [c&7])) (Rsh8Ux64 <t> x (MOVBconst [-c&7])))
-(RotateLeft16 <t> x (MOVHconst [c])) -> (Or16 (Lsh16x64 <t> x (MOVHconst [c&15])) (Rsh16Ux64 <t> x (MOVHconst [-c&15])))
-(RotateLeft32 <t> x (MOVWconst [c])) -> (Or32 (Lsh32x64 <t> x (MOVWconst [c&31])) (Rsh32Ux64 <t> x (MOVWconst [-c&31])))
-(RotateLeft64 <t> x (MOVDconst [c])) -> (Or64 (Lsh64x64 <t> x (MOVDconst [c&63])) (Rsh64Ux64 <t> x (MOVDconst [-c&63])))
-
-(Less64 ...) -> (SLT ...)
-(Less32 x y) -> (SLT (SignExt32to64 x) (SignExt32to64 y))
-(Less16 x y) -> (SLT (SignExt16to64 x) (SignExt16to64 y))
-(Less8 x y) -> (SLT (SignExt8to64 x) (SignExt8to64 y))
-(Less64U ...) -> (SLTU ...)
-(Less32U x y) -> (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
-(Less16U x y) -> (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
-(Less8U x y) -> (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y))
-(Less64F ...) -> (FLTD ...)
-(Less32F ...) -> (FLTS ...)
+(RotateLeft8 <t> x (MOVBconst [c])) => (Or8 (Lsh8x64 <t> x (MOVBconst [c&7])) (Rsh8Ux64 <t> x (MOVBconst [-c&7])))
+(RotateLeft16 <t> x (MOVHconst [c])) => (Or16 (Lsh16x64 <t> x (MOVHconst [c&15])) (Rsh16Ux64 <t> x (MOVHconst [-c&15])))
+(RotateLeft32 <t> x (MOVWconst [c])) => (Or32 (Lsh32x64 <t> x (MOVWconst [c&31])) (Rsh32Ux64 <t> x (MOVWconst [-c&31])))
+(RotateLeft64 <t> x (MOVDconst [c])) => (Or64 (Lsh64x64 <t> x (MOVDconst [c&63])) (Rsh64Ux64 <t> x (MOVDconst [-c&63])))
+
+(Less64 ...) => (SLT ...)
+(Less32 x y) => (SLT (SignExt32to64 x) (SignExt32to64 y))
+(Less16 x y) => (SLT (SignExt16to64 x) (SignExt16to64 y))
+(Less8 x y) => (SLT (SignExt8to64 x) (SignExt8to64 y))
+(Less64U ...) => (SLTU ...)
+(Less32U x y) => (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Less16U x y) => (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Less8U x y) => (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y))
+(Less64F ...) => (FLTD ...)
+(Less32F ...) => (FLTS ...)
// Convert x <= y to !(y > x).
-(Leq64 x y) -> (Not (Less64 y x))
-(Leq32 x y) -> (Not (Less32 y x))
-(Leq16 x y) -> (Not (Less16 y x))
-(Leq8 x y) -> (Not (Less8 y x))
-(Leq64U x y) -> (Not (Less64U y x))
-(Leq32U x y) -> (Not (Less32U y x))
-(Leq16U x y) -> (Not (Less16U y x))
-(Leq8U x y) -> (Not (Less8U y x))
-(Leq64F ...) -> (FLED ...)
-(Leq32F ...) -> (FLES ...)
-
-(EqPtr x y) -> (SEQZ (SUB <x.Type> x y))
-(Eq64 x y) -> (SEQZ (SUB <x.Type> x y))
-(Eq32 x y) -> (SEQZ (SUBW <x.Type> x y))
-(Eq16 x y) -> (SEQZ (ZeroExt16to64 (SUB <x.Type> x y)))
-(Eq8 x y) -> (SEQZ (ZeroExt8to64 (SUB <x.Type> x y)))
-(Eq64F ...) -> (FEQD ...)
-(Eq32F ...) -> (FEQS ...)
-
-(NeqPtr x y) -> (SNEZ (SUB <x.Type> x y))
-(Neq64 x y) -> (SNEZ (SUB <x.Type> x y))
-(Neq32 x y) -> (SNEZ (SUBW <x.Type> x y))
-(Neq16 x y) -> (SNEZ (ZeroExt16to64 (SUB <x.Type> x y)))
-(Neq8 x y) -> (SNEZ (ZeroExt8to64 (SUB <x.Type> x y)))
-(Neq64F ...) -> (FNED ...)
-(Neq32F ...) -> (FNES ...)
+(Leq64 x y) => (Not (Less64 y x))
+(Leq32 x y) => (Not (Less32 y x))
+(Leq16 x y) => (Not (Less16 y x))
+(Leq8 x y) => (Not (Less8 y x))
+(Leq64U x y) => (Not (Less64U y x))
+(Leq32U x y) => (Not (Less32U y x))
+(Leq16U x y) => (Not (Less16U y x))
+(Leq8U x y) => (Not (Less8U y x))
+(Leq64F ...) => (FLED ...)
+(Leq32F ...) => (FLES ...)
+
+(EqPtr x y) => (SEQZ (SUB <x.Type> x y))
+(Eq64 x y) => (SEQZ (SUB <x.Type> x y))
+(Eq32 x y) => (SEQZ (SUBW <x.Type> x y))
+(Eq16 x y) => (SEQZ (ZeroExt16to64 (SUB <x.Type> x y)))
+(Eq8 x y) => (SEQZ (ZeroExt8to64 (SUB <x.Type> x y)))
+(Eq64F ...) => (FEQD ...)
+(Eq32F ...) => (FEQS ...)
+
+(NeqPtr x y) => (SNEZ (SUB <x.Type> x y))
+(Neq64 x y) => (SNEZ (SUB <x.Type> x y))
+(Neq32 x y) => (SNEZ (SUBW <x.Type> x y))
+(Neq16 x y) => (SNEZ (ZeroExt16to64 (SUB <x.Type> x y)))
+(Neq8 x y) => (SNEZ (ZeroExt8to64 (SUB <x.Type> x y)))
+(Neq64F ...) => (FNED ...)
+(Neq32F ...) => (FNES ...)
// Loads
-(Load <t> ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem)
-(Load <t> ptr mem) && ( is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem)
-(Load <t> ptr mem) && ( is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem)
-(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem)
-(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem)
-(Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) -> (MOVWload ptr mem)
-(Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) -> (MOVWUload ptr mem)
-(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
-(Load <t> ptr mem) && is32BitFloat(t) -> (FMOVWload ptr mem)
-(Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && ( is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && ( is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) => (MOVWUload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (FMOVWload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
// Stores
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
// We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis
// knows what variables are being read/written by the ops.
-(MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-
-(MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
- (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
- (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
- (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
- (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-
-(MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
- (MOVBUload [off1+off2] {sym} base mem)
-(MOVBload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
- (MOVBload [off1+off2] {sym} base mem)
-(MOVHUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
- (MOVHUload [off1+off2] {sym} base mem)
-(MOVHload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
- (MOVHload [off1+off2] {sym} base mem)
-(MOVWUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
- (MOVWUload [off1+off2] {sym} base mem)
-(MOVWload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
- (MOVWload [off1+off2] {sym} base mem)
-(MOVDload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
- (MOVDload [off1+off2] {sym} base mem)
-
-(MOVBstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) ->
- (MOVBstore [off1+off2] {sym} base val mem)
-(MOVHstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) ->
- (MOVHstore [off1+off2] {sym} base val mem)
-(MOVWstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) ->
- (MOVWstore [off1+off2] {sym} base val mem)
-(MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) ->
- (MOVDstore [off1+off2] {sym} base val mem)
-(MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBstorezero [off1+off2] {sym} ptr mem)
-(MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHstorezero [off1+off2] {sym} ptr mem)
-(MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWstorezero [off1+off2] {sym} ptr mem)
-(MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVDstorezero [off1+off2] {sym} ptr mem)
+(MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBUload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
+(MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
+(MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVHUload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
+(MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
+(MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVWUload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
+(MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
+(MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
+
+(MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
+(MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
+(MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
+(MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
+(MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVBstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVHstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVWstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
+(MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVDstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
+
+(MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVBUload [off1+int32(off2)] {sym} base mem)
+(MOVBload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVBload [off1+int32(off2)] {sym} base mem)
+(MOVHUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVHUload [off1+int32(off2)] {sym} base mem)
+(MOVHload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVHload [off1+int32(off2)] {sym} base mem)
+(MOVWUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVWUload [off1+int32(off2)] {sym} base mem)
+(MOVWload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVWload [off1+int32(off2)] {sym} base mem)
+(MOVDload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVDload [off1+int32(off2)] {sym} base mem)
+
+(MOVBstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (MOVBstore [off1+int32(off2)] {sym} base val mem)
+(MOVHstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (MOVHstore [off1+int32(off2)] {sym} base val mem)
+(MOVWstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (MOVWstore [off1+int32(off2)] {sym} base val mem)
+(MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (MOVDstore [off1+int32(off2)] {sym} base val mem)
+(MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
// with OffPtr -> ADDI.
-(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+d) -> (MOVaddr [c+d] {s} x)
+(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x)
// Zeroing
// TODO: more optimized zeroing, including attempting to use aligned accesses.
-(Zero [0] _ mem) -> mem
-(Zero [1] ptr mem) -> (MOVBstore ptr (MOVBconst) mem)
-(Zero [2] ptr mem) -> (MOVHstore ptr (MOVHconst) mem)
-(Zero [4] ptr mem) -> (MOVWstore ptr (MOVWconst) mem)
-(Zero [8] ptr mem) -> (MOVDstore ptr (MOVDconst) mem)
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVBconst) mem)
+(Zero [2] ptr mem) => (MOVHstore ptr (MOVHconst) mem)
+(Zero [4] ptr mem) => (MOVWstore ptr (MOVWconst) mem)
+(Zero [8] ptr mem) => (MOVDstore ptr (MOVDconst) mem)
// Generic zeroing uses a loop
-(Zero [s] {t} ptr mem) ->
- (LoweredZero [t.(*types.Type).Alignment()]
+(Zero [s] {t} ptr mem) =>
+ (LoweredZero [t.Alignment()]
ptr
- (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.(*types.Type).Alignment(), config)]))
+ (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)]))
mem)
-(Convert ...) -> (MOVconvert ...)
+(Convert ...) => (MOVconvert ...)
// Checks
-(IsNonNil p) -> (NeqPtr (MOVDconst) p)
-(IsInBounds ...) -> (Less64U ...)
-(IsSliceInBounds ...) -> (Leq64U ...)
+(IsNonNil p) => (NeqPtr (MOVDconst) p)
+(IsInBounds ...) => (Less64U ...)
+(IsSliceInBounds ...) => (Leq64U ...)
// Trivial lowering
-(NilCheck ...) -> (LoweredNilCheck ...)
-(GetClosurePtr ...) -> (LoweredGetClosurePtr ...)
-(GetCallerSP ...) -> (LoweredGetCallerSP ...)
-(GetCallerPC ...) -> (LoweredGetCallerPC ...)
+(NilCheck ...) => (LoweredNilCheck ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
// Write barrier.
-(WB ...) -> (LoweredWB ...)
+(WB ...) => (LoweredWB ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 -> (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 -> (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 -> (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
// Moves
// TODO: more optimized moves, including attempting to use aligned accesses.
-(Move [0] _ _ mem) -> mem
-(Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem)
-(Move [2] dst src mem) -> (MOVHstore dst (MOVHload src mem) mem)
-(Move [4] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem)
-(Move [8] dst src mem) -> (MOVDstore dst (MOVDload src mem) mem)
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] dst src mem) => (MOVHstore dst (MOVHload src mem) mem)
+(Move [4] dst src mem) => (MOVWstore dst (MOVWload src mem) mem)
+(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
// Generic move uses a loop
-(Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) ->
- (LoweredMove [t.(*types.Type).Alignment()]
+(Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) =>
+ (LoweredMove [t.Alignment()]
dst
src
- (ADDI <src.Type> [s-moveSize(t.(*types.Type).Alignment(), config)] src)
+ (ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src)
mem)
// Boolean ops; 0=false, 1=true
-(AndB ...) -> (AND ...)
-(OrB ...) -> (OR ...)
-(EqB x y) -> (XORI [1] (XOR <typ.Bool> x y))
-(NeqB ...) -> (XOR ...)
-(Not x) -> (XORI [1] x)
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XORI [1] (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XORI [1] x)
// Lowering pointer arithmetic
// TODO: Special handling for SP offsets, like ARM
-(OffPtr [off] ptr:(SP)) -> (MOVaddr [off] ptr)
-(OffPtr [off] ptr) && is32Bit(off) -> (ADDI [off] ptr)
-(OffPtr [off] ptr) -> (ADD (MOVDconst [off]) ptr)
-
-(Const8 ...) -> (MOVBconst ...)
-(Const16 ...) -> (MOVHconst ...)
-(Const32 ...) -> (MOVWconst ...)
-(Const64 ...) -> (MOVDconst ...)
-(Const32F [val]) -> (FMVSX (MOVWconst [int64(int32(math.Float32bits(float32(math.Float64frombits(uint64(val))))))]))
-(Const64F [val]) -> (FMVDX (MOVDconst [val]))
-(ConstNil) -> (MOVDconst [0])
-(ConstBool ...) -> (MOVBconst ...)
+(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVaddr [int32(off)] ptr)
+(OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr)
+(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
+
+(Const8 ...) => (MOVBconst ...)
+(Const16 ...) => (MOVHconst ...)
+(Const32 ...) => (MOVWconst ...)
+(Const64 ...) => (MOVDconst ...)
+(Const32F [val]) => (FMVSX (MOVWconst [int32(math.Float32bits(val))]))
+(Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
+(ConstNil) => (MOVDconst [0])
+(ConstBool [val]) => (MOVBconst [int8(b2i(val))])
// Convert 64 bit immediate to two 32 bit immediates, combine with add and shift.
// The lower 32 bit immediate will be treated as signed,
// We don't have to worry about overflow from the increment,
// because if the top half is all 1s, and int32(c) is negative,
// then the overall constant fits in an int32.
-(MOVDconst <t> [c]) && !is32Bit(c) && int32(c) < 0 -> (ADD (SLLI <t> [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))]))
-(MOVDconst <t> [c]) && !is32Bit(c) && int32(c) >= 0 -> (ADD (SLLI <t> [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))]))
+(MOVDconst <t> [c]) && !is32Bit(c) && int32(c) < 0 => (ADD (SLLI <t> [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))]))
+(MOVDconst <t> [c]) && !is32Bit(c) && int32(c) >= 0 => (ADD (SLLI <t> [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))]))
(Addr {sym} base) => (MOVaddr {sym} [0] base)
-(LocalAddr {sym} base _) -> (MOVaddr {sym} base)
+(LocalAddr {sym} base _) => (MOVaddr {sym} base)
// Conditional branches
//
// so we could generate more efficient code by computing the condition in the
// branch itself. This should be revisited now that the compiler has support
// for two control values (https://golang.org/cl/196557).
-(If cond yes no) -> (BNE cond yes no)
+(If cond yes no) => (BNE cond yes no)
// Calls
-(StaticCall ...) -> (CALLstatic ...)
-(ClosureCall ...) -> (CALLclosure ...)
-(InterCall ...) -> (CALLinter ...)
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
// Atomic Intrinsics
-(AtomicLoad8 ...) -> (LoweredAtomicLoad8 ...)
-(AtomicLoad32 ...) -> (LoweredAtomicLoad32 ...)
-(AtomicLoad64 ...) -> (LoweredAtomicLoad64 ...)
-(AtomicLoadPtr ...) -> (LoweredAtomicLoad64 ...)
+(AtomicLoad8 ...) => (LoweredAtomicLoad8 ...)
+(AtomicLoad32 ...) => (LoweredAtomicLoad32 ...)
+(AtomicLoad64 ...) => (LoweredAtomicLoad64 ...)
+(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
-(AtomicStore8 ...) -> (LoweredAtomicStore8 ...)
-(AtomicStore32 ...) -> (LoweredAtomicStore32 ...)
-(AtomicStore64 ...) -> (LoweredAtomicStore64 ...)
-(AtomicStorePtrNoWB ...) -> (LoweredAtomicStore64 ...)
+(AtomicStore8 ...) => (LoweredAtomicStore8 ...)
+(AtomicStore32 ...) => (LoweredAtomicStore32 ...)
+(AtomicStore64 ...) => (LoweredAtomicStore64 ...)
+(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
-(AtomicAdd32 ...) -> (LoweredAtomicAdd32 ...)
-(AtomicAdd64 ...) -> (LoweredAtomicAdd64 ...)
+(AtomicAdd32 ...) => (LoweredAtomicAdd32 ...)
+(AtomicAdd64 ...) => (LoweredAtomicAdd64 ...)
-(AtomicCompareAndSwap32 ...) -> (LoweredAtomicCas32 ...)
-(AtomicCompareAndSwap64 ...) -> (LoweredAtomicCas64 ...)
+(AtomicCompareAndSwap32 ...) => (LoweredAtomicCas32 ...)
+(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
-(AtomicExchange32 ...) -> (LoweredAtomicExchange32 ...)
-(AtomicExchange64 ...) -> (LoweredAtomicExchange64 ...)
+(AtomicExchange32 ...) => (LoweredAtomicExchange32 ...)
+(AtomicExchange64 ...) => (LoweredAtomicExchange64 ...)
// Optimizations
// Absorb SNEZ into branch.
-(BNE (SNEZ x) yes no) -> (BNE x yes no)
+(BNE (SNEZ x) yes no) => (BNE x yes no)
// Store zero
-(MOVBstore [off] {sym} ptr (MOVBconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
-(MOVHstore [off] {sym} ptr (MOVHconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
-(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
-(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVBconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVHconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
// Fold constant into immediate instructions where possible.
-(ADD (MOVBconst [val]) x) && is32Bit(val) -> (ADDI [val] x)
-(ADD (MOVHconst [val]) x) && is32Bit(val) -> (ADDI [val] x)
-(ADD (MOVWconst [val]) x) && is32Bit(val) -> (ADDI [val] x)
-(ADD (MOVDconst [val]) x) && is32Bit(val) -> (ADDI [val] x)
-
-(AND (MOVBconst [val]) x) && is32Bit(val) -> (ANDI [val] x)
-(AND (MOVHconst [val]) x) && is32Bit(val) -> (ANDI [val] x)
-(AND (MOVWconst [val]) x) && is32Bit(val) -> (ANDI [val] x)
-(AND (MOVDconst [val]) x) && is32Bit(val) -> (ANDI [val] x)
-
-(OR (MOVBconst [val]) x) && is32Bit(val) -> (ORI [val] x)
-(OR (MOVHconst [val]) x) && is32Bit(val) -> (ORI [val] x)
-(OR (MOVWconst [val]) x) && is32Bit(val) -> (ORI [val] x)
-(OR (MOVDconst [val]) x) && is32Bit(val) -> (ORI [val] x)
-
-(XOR (MOVBconst [val]) x) && is32Bit(val) -> (XORI [val] x)
-(XOR (MOVHconst [val]) x) && is32Bit(val) -> (XORI [val] x)
-(XOR (MOVWconst [val]) x) && is32Bit(val) -> (XORI [val] x)
-(XOR (MOVDconst [val]) x) && is32Bit(val) -> (XORI [val] x)
-
-(SLL x (MOVBconst [val])) -> (SLLI [val&63] x)
-(SLL x (MOVHconst [val])) -> (SLLI [val&63] x)
-(SLL x (MOVWconst [val])) -> (SLLI [val&63] x)
-(SLL x (MOVDconst [val])) -> (SLLI [val&63] x)
-
-(SRL x (MOVBconst [val])) -> (SRLI [val&63] x)
-(SRL x (MOVHconst [val])) -> (SRLI [val&63] x)
-(SRL x (MOVWconst [val])) -> (SRLI [val&63] x)
-(SRL x (MOVDconst [val])) -> (SRLI [val&63] x)
-
-(SRA x (MOVBconst [val])) -> (SRAI [val&63] x)
-(SRA x (MOVHconst [val])) -> (SRAI [val&63] x)
-(SRA x (MOVWconst [val])) -> (SRAI [val&63] x)
-(SRA x (MOVDconst [val])) -> (SRAI [val&63] x)
+(ADD (MOVBconst [val]) x) => (ADDI [int64(val)] x)
+(ADD (MOVHconst [val]) x) => (ADDI [int64(val)] x)
+(ADD (MOVWconst [val]) x) => (ADDI [int64(val)] x)
+(ADD (MOVDconst [val]) x) && is32Bit(val) => (ADDI [val] x)
+
+(AND (MOVBconst [val]) x) => (ANDI [int64(val)] x)
+(AND (MOVHconst [val]) x) => (ANDI [int64(val)] x)
+(AND (MOVWconst [val]) x) => (ANDI [int64(val)] x)
+(AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x)
+
+(OR (MOVBconst [val]) x) => (ORI [int64(val)] x)
+(OR (MOVHconst [val]) x) => (ORI [int64(val)] x)
+(OR (MOVWconst [val]) x) => (ORI [int64(val)] x)
+(OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x)
+
+(XOR (MOVBconst [val]) x) => (XORI [int64(val)] x)
+(XOR (MOVHconst [val]) x) => (XORI [int64(val)] x)
+(XOR (MOVWconst [val]) x) => (XORI [int64(val)] x)
+(XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x)
+
+(SLL x (MOVBconst [val])) => (SLLI [int64(val&63)] x)
+(SLL x (MOVHconst [val])) => (SLLI [int64(val&63)] x)
+(SLL x (MOVWconst [val])) => (SLLI [int64(val&63)] x)
+(SLL x (MOVDconst [val])) => (SLLI [int64(val&63)] x)
+
+(SRL x (MOVBconst [val])) => (SRLI [int64(val&63)] x)
+(SRL x (MOVHconst [val])) => (SRLI [int64(val&63)] x)
+(SRL x (MOVWconst [val])) => (SRLI [int64(val&63)] x)
+(SRL x (MOVDconst [val])) => (SRLI [int64(val&63)] x)
+
+(SRA x (MOVBconst [val])) => (SRAI [int64(val&63)] x)
+(SRA x (MOVHconst [val])) => (SRAI [int64(val&63)] x)
+(SRA x (MOVWconst [val])) => (SRAI [int64(val&63)] x)
+(SRA x (MOVDconst [val])) => (SRAI [int64(val&63)] x)
// Convert subtraction of a const into ADDI with negative immediate, where possible.
-(SUB x (MOVBconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
-(SUB x (MOVHconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
-(SUB x (MOVWconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
-(SUB x (MOVDconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
+(SUB x (MOVBconst [val])) => (ADDI [-int64(val)] x)
+(SUB x (MOVHconst [val])) => (ADDI [-int64(val)] x)
+(SUB x (MOVWconst [val])) && is32Bit(-int64(val)) => (ADDI [-int64(val)] x)
+(SUB x (MOVDconst [val])) && is32Bit(-val) => (ADDI [-val] x)
// Subtraction of zero.
-(SUB x (MOVBconst [0])) -> x
-(SUB x (MOVHconst [0])) -> x
-(SUB x (MOVWconst [0])) -> x
-(SUB x (MOVDconst [0])) -> x
+(SUB x (MOVBconst [0])) => x
+(SUB x (MOVHconst [0])) => x
+(SUB x (MOVWconst [0])) => x
+(SUB x (MOVDconst [0])) => x
// Subtraction of zero with sign extension.
-(SUBW x (MOVWconst [0])) -> (ADDIW [0] x)
+(SUBW x (MOVWconst [0])) => (ADDIW [0] x)
// Subtraction from zero.
-(SUB (MOVBconst [0]) x) -> (NEG x)
-(SUB (MOVHconst [0]) x) -> (NEG x)
-(SUB (MOVWconst [0]) x) -> (NEG x)
-(SUB (MOVDconst [0]) x) -> (NEG x)
+(SUB (MOVBconst [0]) x) => (NEG x)
+(SUB (MOVHconst [0]) x) => (NEG x)
+(SUB (MOVWconst [0]) x) => (NEG x)
+(SUB (MOVDconst [0]) x) => (NEG x)
// Subtraction from zero with sign extension.
-(SUBW (MOVDconst [0]) x) -> (NEGW x)
+(SUBW (MOVDconst [0]) x) => (NEGW x)
// Addition of zero.
-(ADDI [0] x) -> x
+(ADDI [0] x) => x
package ssa
import "math"
-import "cmd/compile/internal/types"
func rewriteValueRISCV64(v *Value) bool {
switch v.Op {
v.Op = OpRISCV64MOVBconst
return true
case OpConstBool:
- v.Op = OpRISCV64MOVBconst
- return true
+ return rewriteValueRISCV64_OpConstBool(v)
case OpConstNil:
return rewriteValueRISCV64_OpConstNil(v)
case OpConvert:
case OpDiv16u:
return rewriteValueRISCV64_OpDiv16u(v)
case OpDiv32:
- v.Op = OpRISCV64DIVW
- return true
+ return rewriteValueRISCV64_OpDiv32(v)
case OpDiv32F:
v.Op = OpRISCV64FDIVS
return true
v.Op = OpRISCV64DIVUW
return true
case OpDiv64:
- v.Op = OpRISCV64DIV
- return true
+ return rewriteValueRISCV64_OpDiv64(v)
case OpDiv64F:
v.Op = OpRISCV64FDIVD
return true
case OpMod16u:
return rewriteValueRISCV64_OpMod16u(v)
case OpMod32:
- v.Op = OpRISCV64REMW
- return true
+ return rewriteValueRISCV64_OpMod32(v)
case OpMod32u:
v.Op = OpRISCV64REMUW
return true
case OpMod64:
- v.Op = OpRISCV64REM
- return true
+ return rewriteValueRISCV64_OpMod64(v)
case OpMod64u:
v.Op = OpRISCV64REMU
return true
v.reset(OpRISCV64ADD)
v0 := b.NewValue0(v.Pos, OpRISCV64ADD, t)
v1 := b.NewValue0(v.Pos, OpRISCV64SRLI, t)
- v1.AuxInt = 1
+ v1.AuxInt = int64ToAuxInt(1)
v1.AddArg(x)
v2 := b.NewValue0(v.Pos, OpRISCV64SRLI, t)
- v2.AuxInt = 1
+ v2.AuxInt = int64ToAuxInt(1)
v2.AddArg(y)
v0.AddArg2(v1, v2)
v3 := b.NewValue0(v.Pos, OpRISCV64ANDI, t)
- v3.AuxInt = 1
+ v3.AuxInt = int64ToAuxInt(1)
v4 := b.NewValue0(v.Pos, OpRISCV64AND, t)
v4.AddArg2(x, y)
v3.AddArg(v4)
b := v.Block
typ := &b.Func.Config.Types
// match: (Const32F [val])
- // result: (FMVSX (MOVWconst [int64(int32(math.Float32bits(float32(math.Float64frombits(uint64(val))))))]))
+ // result: (FMVSX (MOVWconst [int32(math.Float32bits(val))]))
for {
- val := v.AuxInt
+ val := auxIntToFloat32(v.AuxInt)
v.reset(OpRISCV64FMVSX)
v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32)
- v0.AuxInt = int64(int32(math.Float32bits(float32(math.Float64frombits(uint64(val))))))
+ v0.AuxInt = int32ToAuxInt(int32(math.Float32bits(val)))
v.AddArg(v0)
return true
}
b := v.Block
typ := &b.Func.Config.Types
// match: (Const64F [val])
- // result: (FMVDX (MOVDconst [val]))
+ // result: (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
for {
- val := v.AuxInt
+ val := auxIntToFloat64(v.AuxInt)
v.reset(OpRISCV64FMVDX)
v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
- v0.AuxInt = val
+ v0.AuxInt = int64ToAuxInt(int64(math.Float64bits(val)))
v.AddArg(v0)
return true
}
}
+func rewriteValueRISCV64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [val])
+ // result: (MOVBconst [int8(b2i(val))])
+ for {
+ val := auxIntToBool(v.AuxInt)
+ v.reset(OpRISCV64MOVBconst)
+ v.AuxInt = int8ToAuxInt(int8(b2i(val)))
+ return true
+ }
+}
func rewriteValueRISCV64_OpConstNil(v *Value) bool {
// match: (ConstNil)
// result: (MOVDconst [0])
for {
v.reset(OpRISCV64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
}
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (Div16 x y)
+ // match: (Div16 x y [false])
// result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
x := v_0
y := v_1
v.reset(OpRISCV64DIVW)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValueRISCV64_OpDiv16u(v *Value) bool {
v_1 := v.Args[1]
return true
}
}
+func rewriteValueRISCV64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div32 x y [false])
+ // result: (DIVW x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIVW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 x y [false])
+ // result: (DIV x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIV)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpDiv8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
x := v_0
y := v_1
v.reset(OpRISCV64XORI)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpRISCV64XOR, typ.Bool)
v0.AddArg2(x, y)
v.AddArg(v0)
x := v_0
y := v_1
v.reset(OpRISCV64SRAI)
- v.AuxInt = 32
+ v.AuxInt = int64ToAuxInt(32)
v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64)
v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v1.AddArg(x)
x := v_0
y := v_1
v.reset(OpRISCV64SRLI)
- v.AuxInt = 32
+ v.AuxInt = int64ToAuxInt(32)
v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64)
v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(x)
// match: (LocalAddr {sym} base _)
// result: (MOVaddr {sym} base)
for {
- sym := v.Aux
+ sym := auxToSym(v.Aux)
base := v_0
v.reset(OpRISCV64MOVaddr)
- v.Aux = sym
+ v.Aux = symToAux(sym)
v.AddArg(base)
return true
}
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg16, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg16, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg16, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg16, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg32, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg32, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg32, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg32, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg64, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg64, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg64, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg64, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg8, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg8, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg8, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg8, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (Mod16 x y)
+ // match: (Mod16 x y [false])
// result: (REMW (SignExt16to32 x) (SignExt16to32 y))
for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
x := v_0
y := v_1
v.reset(OpRISCV64REMW)
v.AddArg2(v0, v1)
return true
}
+ return false
}
func rewriteValueRISCV64_OpMod16u(v *Value) bool {
v_1 := v.Args[1]
return true
}
}
+func rewriteValueRISCV64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod32 x y [false])
+ // result: (REMW x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REMW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod64 x y [false])
+ // result: (REM x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REM)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpMod8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (Move [0] _ _ mem)
// result: mem
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
mem := v_2
// match: (Move [1] dst src mem)
// result: (MOVBstore dst (MOVBload src mem) mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt64(v.AuxInt) != 1 {
break
}
dst := v_0
// match: (Move [2] dst src mem)
// result: (MOVHstore dst (MOVHload src mem) mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt64(v.AuxInt) != 2 {
break
}
dst := v_0
// match: (Move [4] dst src mem)
// result: (MOVWstore dst (MOVWload src mem) mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
dst := v_0
// match: (Move [8] dst src mem)
// result: (MOVDstore dst (MOVDload src mem) mem)
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
dst := v_0
}
// match: (Move [s] {t} dst src mem)
// cond: (s <= 16 || logLargeCopy(v, s))
- // result: (LoweredMove [t.(*types.Type).Alignment()] dst src (ADDI <src.Type> [s-moveSize(t.(*types.Type).Alignment(), config)] src) mem)
+ // result: (LoweredMove [t.Alignment()] dst src (ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src) mem)
for {
- s := v.AuxInt
- t := v.Aux
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
break
}
v.reset(OpRISCV64LoweredMove)
- v.AuxInt = t.(*types.Type).Alignment()
+ v.AuxInt = int64ToAuxInt(t.Alignment())
v0 := b.NewValue0(v.Pos, OpRISCV64ADDI, src.Type)
- v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config)
+ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
v0.AddArg(src)
v.AddArg4(dst, src, v0, mem)
return true
for {
x := v_0
v.reset(OpRISCV64XORI)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
v.AddArg(x)
return true
}
b := v.Block
typ := &b.Func.Config.Types
// match: (OffPtr [off] ptr:(SP))
- // result: (MOVaddr [off] ptr)
+ // cond: is32Bit(off)
+ // result: (MOVaddr [int32(off)] ptr)
for {
- off := v.AuxInt
+ off := auxIntToInt64(v.AuxInt)
ptr := v_0
- if ptr.Op != OpSP {
+ if ptr.Op != OpSP || !(is32Bit(off)) {
break
}
v.reset(OpRISCV64MOVaddr)
- v.AuxInt = off
+ v.AuxInt = int32ToAuxInt(int32(off))
v.AddArg(ptr)
return true
}
// cond: is32Bit(off)
// result: (ADDI [off] ptr)
for {
- off := v.AuxInt
+ off := auxIntToInt64(v.AuxInt)
ptr := v_0
if !(is32Bit(off)) {
break
}
v.reset(OpRISCV64ADDI)
- v.AuxInt = off
+ v.AuxInt = int64ToAuxInt(off)
v.AddArg(ptr)
return true
}
// match: (OffPtr [off] ptr)
// result: (ADD (MOVDconst [off]) ptr)
for {
- off := v.AuxInt
+ off := auxIntToInt64(v.AuxInt)
ptr := v_0
v.reset(OpRISCV64ADD)
v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
- v0.AuxInt = off
+ v0.AuxInt = int64ToAuxInt(off)
v.AddArg2(v0, ptr)
return true
}
// cond: boundsABI(kind) == 0
// result: (LoweredPanicBoundsA [kind] x y mem)
for {
- kind := v.AuxInt
+ kind := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
mem := v_2
break
}
v.reset(OpRISCV64LoweredPanicBoundsA)
- v.AuxInt = kind
+ v.AuxInt = int64ToAuxInt(kind)
v.AddArg3(x, y, mem)
return true
}
// cond: boundsABI(kind) == 1
// result: (LoweredPanicBoundsB [kind] x y mem)
for {
- kind := v.AuxInt
+ kind := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
mem := v_2
break
}
v.reset(OpRISCV64LoweredPanicBoundsB)
- v.AuxInt = kind
+ v.AuxInt = int64ToAuxInt(kind)
v.AddArg3(x, y, mem)
return true
}
// cond: boundsABI(kind) == 2
// result: (LoweredPanicBoundsC [kind] x y mem)
for {
- kind := v.AuxInt
+ kind := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
mem := v_2
break
}
v.reset(OpRISCV64LoweredPanicBoundsC)
- v.AuxInt = kind
+ v.AuxInt = int64ToAuxInt(kind)
v.AddArg3(x, y, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ADD (MOVBconst [val]) x)
- // cond: is32Bit(val)
- // result: (ADDI [val] x)
+ // result: (ADDI [int64(val)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpRISCV64MOVBconst {
continue
}
- val := v_0.AuxInt
+ val := auxIntToInt8(v_0.AuxInt)
x := v_1
- if !(is32Bit(val)) {
- continue
- }
v.reset(OpRISCV64ADDI)
- v.AuxInt = val
+ v.AuxInt = int64ToAuxInt(int64(val))
v.AddArg(x)
return true
}
break
}
// match: (ADD (MOVHconst [val]) x)
- // cond: is32Bit(val)
- // result: (ADDI [val] x)
+ // result: (ADDI [int64(val)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpRISCV64MOVHconst {
continue
}
- val := v_0.AuxInt
+ val := auxIntToInt16(v_0.AuxInt)
x := v_1
- if !(is32Bit(val)) {
- continue
- }
v.reset(OpRISCV64ADDI)
- v.AuxInt = val
+ v.AuxInt = int64ToAuxInt(int64(val))
v.AddArg(x)
return true
}
break
}
// match: (ADD (MOVWconst [val]) x)
- // cond: is32Bit(val)
- // result: (ADDI [val] x)
+ // result: (ADDI [int64(val)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpRISCV64MOVWconst {
continue
}
- val := v_0.AuxInt
+ val := auxIntToInt32(v_0.AuxInt)
x := v_1
- if !(is32Bit(val)) {
- continue
- }
v.reset(OpRISCV64ADDI)
- v.AuxInt = val
+ v.AuxInt = int64ToAuxInt(int64(val))
v.AddArg(x)
return true
}
if v_0.Op != OpRISCV64MOVDconst {
continue
}
- val := v_0.AuxInt
+ val := auxIntToInt64(v_0.AuxInt)
x := v_1
if !(is32Bit(val)) {
continue
}
v.reset(OpRISCV64ADDI)
- v.AuxInt = val
+ v.AuxInt = int64ToAuxInt(val)
v.AddArg(x)
return true
}
func rewriteValueRISCV64_OpRISCV64ADDI(v *Value) bool {
v_0 := v.Args[0]
// match: (ADDI [c] (MOVaddr [d] {s} x))
- // cond: is32Bit(c+d)
- // result: (MOVaddr [c+d] {s} x)
+ // cond: is32Bit(c+int64(d))
+ // result: (MOVaddr [int32(c)+d] {s} x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpRISCV64MOVaddr {
break
}
- d := v_0.AuxInt
- s := v_0.Aux
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
x := v_0.Args[0]
- if !(is32Bit(c + d)) {
+ if !(is32Bit(c + int64(d))) {
break
}
v.reset(OpRISCV64MOVaddr)
- v.AuxInt = c + d
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(int32(c) + d)
+ v.Aux = symToAux(s)
v.AddArg(x)
return true
}
// match: (ADDI [0] x)
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
x := v_0
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (AND (MOVBconst [val]) x)
- // cond: is32Bit(val)
- // result: (ANDI [val] x)
+ // result: (ANDI [int64(val)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpRISCV64MOVBconst {
continue
}
- val := v_0.AuxInt
+ val := auxIntToInt8(v_0.AuxInt)
x := v_1
- if !(is32Bit(val)) {
- continue
- }
v.reset(OpRISCV64ANDI)
- v.AuxInt = val
+ v.AuxInt = int64ToAuxInt(int64(val))
v.AddArg(x)
return true
}
break
}
// match: (AND (MOVHconst [val]) x)
- // cond: is32Bit(val)
- // result: (ANDI [val] x)
+ // result: (ANDI [int64(val)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpRISCV64MOVHconst {
continue
}
- val := v_0.AuxInt
+ val := auxIntToInt16(v_0.AuxInt)
x := v_1
- if !(is32Bit(val)) {
- continue
- }
v.reset(OpRISCV64ANDI)
- v.AuxInt = val
+ v.AuxInt = int64ToAuxInt(int64(val))
v.AddArg(x)
return true
}
break
}
// match: (AND (MOVWconst [val]) x)
- // cond: is32Bit(val)
- // result: (ANDI [val] x)
+ // result: (ANDI [int64(val)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpRISCV64MOVWconst {
continue
}
- val := v_0.AuxInt
+ val := auxIntToInt32(v_0.AuxInt)
x := v_1
- if !(is32Bit(val)) {
- continue
- }
v.reset(OpRISCV64ANDI)
- v.AuxInt = val
+ v.AuxInt = int64ToAuxInt(int64(val))
v.AddArg(x)
return true
}
if v_0.Op != OpRISCV64MOVDconst {
continue
}
- val := v_0.AuxInt
+ val := auxIntToInt64(v_0.AuxInt)
x := v_1
if !(is32Bit(val)) {
continue
}
v.reset(OpRISCV64ANDI)
- v.AuxInt = val
+ v.AuxInt = int64ToAuxInt(val)
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBUload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpRISCV64MOVaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpRISCV64MOVBUload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
// match: (MOVBUload [off1] {sym} (ADDI [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVBUload [off1+off2] {sym} base mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBUload [off1+int32(off2)] {sym} base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpRISCV64ADDI {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + off2)) {
break
}
v.reset(OpRISCV64MOVBUload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(base, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpRISCV64MOVaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpRISCV64MOVBload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
// match: (MOVBload [off1] {sym} (ADDI [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVBload [off1+off2] {sym} base mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBload [off1+int32(off2)] {sym} base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpRISCV64ADDI {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + off2)) {
break
}
v.reset(OpRISCV64MOVBload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(base, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpRISCV64MOVaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpRISCV64MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
// match: (MOVBstore [off1] {sym} (ADDI [off2] base) val mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVBstore [off1+off2] {sym} base val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBstore [off1+int32(off2)] {sym} base val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpRISCV64ADDI {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + off2)) {
break
}
v.reset(OpRISCV64MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg3(base, val, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVBconst [0]) mem)
// result: (MOVBstorezero [off] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpRISCV64MOVBconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpRISCV64MOVBconst || auxIntToInt8(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpRISCV64MOVBstorezero)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
- // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpRISCV64MOVaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
break
}
v.reset(OpRISCV64MOVBstorezero)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVBstorezero [off1+off2] {sym} ptr mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpRISCV64ADDI {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + off2)) {
break
}
v.reset(OpRISCV64MOVBstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// result: (ADD (SLLI <t> [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))]))
for {
t := v.Type
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if !(!is32Bit(c) && int32(c) < 0) {
break
}
v.reset(OpRISCV64ADD)
v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = 32
+ v0.AuxInt = int64ToAuxInt(32)
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
- v1.AuxInt = c>>32 + 1
+ v1.AuxInt = int64ToAuxInt(c>>32 + 1)
v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
- v2.AuxInt = int64(int32(c))
+ v2.AuxInt = int64ToAuxInt(int64(int32(c)))
v.AddArg2(v0, v2)
return true
}
// result: (ADD (SLLI <t> [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))]))
for {
t := v.Type
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if !(!is32Bit(c) && int32(c) >= 0) {
break
}
v.reset(OpRISCV64ADD)
v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = 32
+ v0.AuxInt = int64ToAuxInt(32)
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
- v1.AuxInt = c>>32 + 0
+ v1.AuxInt = int64ToAuxInt(c>>32 + 0)
v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
- v2.AuxInt = int64(int32(c))
+ v2.AuxInt = int64ToAuxInt(int64(int32(c)))
v.AddArg2(v0, v2)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpRISCV64MOVaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpRISCV64MOVDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
// match: (MOVDload [off1] {sym} (ADDI [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVDload [off1+off2] {sym} base mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVDload [off1+int32(off2)] {sym} base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpRISCV64ADDI {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + off2)) {
break
}
v.reset(OpRISCV64MOVDload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(base, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpRISCV64MOVaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpRISCV64MOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
// match: (MOVDstore [off1] {sym} (ADDI [off2] base) val mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVDstore [off1+off2] {sym} base val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVDstore [off1+int32(off2)] {sym} base val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpRISCV64ADDI {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + off2)) {
break
}
v.reset(OpRISCV64MOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg3(base, val, mem)
return true
}
// match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem)
// result: (MOVDstorezero [off] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpRISCV64MOVDconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpRISCV64MOVDstorezero)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
- // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVDstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpRISCV64MOVaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
break
}
v.reset(OpRISCV64MOVDstorezero)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVDstorezero [off1+off2] {sym} ptr mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpRISCV64ADDI {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + off2)) {
break
}
v.reset(OpRISCV64MOVDstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVHUload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpRISCV64MOVaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpRISCV64MOVHUload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
// match: (MOVHUload [off1] {sym} (ADDI [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVHUload [off1+off2] {sym} base mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHUload [off1+int32(off2)] {sym} base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpRISCV64ADDI {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + off2)) {
break
}
v.reset(OpRISCV64MOVHUload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(base, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpRISCV64MOVaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpRISCV64MOVHload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
// match: (MOVHload [off1] {sym} (ADDI [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVHload [off1+off2] {sym} base mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHload [off1+int32(off2)] {sym} base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpRISCV64ADDI {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + off2)) {
break
}
v.reset(OpRISCV64MOVHload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(base, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpRISCV64MOVaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpRISCV64MOVHstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
// match: (MOVHstore [off1] {sym} (ADDI [off2] base) val mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVHstore [off1+off2] {sym} base val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHstore [off1+int32(off2)] {sym} base val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpRISCV64ADDI {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + off2)) {
break
}
v.reset(OpRISCV64MOVHstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg3(base, val, mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVHconst [0]) mem)
// result: (MOVHstorezero [off] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpRISCV64MOVHconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpRISCV64MOVHconst || auxIntToInt16(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpRISCV64MOVHstorezero)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
- // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVHstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpRISCV64MOVaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
break
}
v.reset(OpRISCV64MOVHstorezero)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVHstorezero [off1+off2] {sym} ptr mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpRISCV64ADDI {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + off2)) {
break
}
v.reset(OpRISCV64MOVHstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWUload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpRISCV64MOVaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpRISCV64MOVWUload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
// match: (MOVWUload [off1] {sym} (ADDI [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVWUload [off1+off2] {sym} base mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWUload [off1+int32(off2)] {sym} base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpRISCV64ADDI {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + off2)) {
break
}
v.reset(OpRISCV64MOVWUload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(base, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpRISCV64MOVaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpRISCV64MOVWload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
// match: (MOVWload [off1] {sym} (ADDI [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVWload [off1+off2] {sym} base mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWload [off1+int32(off2)] {sym} base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpRISCV64ADDI {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + off2)) {
break
}
v.reset(OpRISCV64MOVWload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(base, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpRISCV64MOVaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpRISCV64MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
// match: (MOVWstore [off1] {sym} (ADDI [off2] base) val mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVWstore [off1+off2] {sym} base val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWstore [off1+int32(off2)] {sym} base val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpRISCV64ADDI {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + off2)) {
break
}
v.reset(OpRISCV64MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg3(base, val, mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem)
// result: (MOVWstorezero [off] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpRISCV64MOVWconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpRISCV64MOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpRISCV64MOVWstorezero)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
- // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpRISCV64MOVaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
break
}
v.reset(OpRISCV64MOVWstorezero)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVWstorezero [off1+off2] {sym} ptr mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpRISCV64ADDI {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + off2)) {
break
}
v.reset(OpRISCV64MOVWstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (OR (MOVBconst [val]) x)
- // cond: is32Bit(val)
- // result: (ORI [val] x)
+ // result: (ORI [int64(val)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpRISCV64MOVBconst {
continue
}
- val := v_0.AuxInt
+ val := auxIntToInt8(v_0.AuxInt)
x := v_1
- if !(is32Bit(val)) {
- continue
- }
v.reset(OpRISCV64ORI)
- v.AuxInt = val
+ v.AuxInt = int64ToAuxInt(int64(val))
v.AddArg(x)
return true
}
break
}
// match: (OR (MOVHconst [val]) x)
- // cond: is32Bit(val)
- // result: (ORI [val] x)
+ // result: (ORI [int64(val)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpRISCV64MOVHconst {
continue
}
- val := v_0.AuxInt
+ val := auxIntToInt16(v_0.AuxInt)
x := v_1
- if !(is32Bit(val)) {
- continue
- }
v.reset(OpRISCV64ORI)
- v.AuxInt = val
+ v.AuxInt = int64ToAuxInt(int64(val))
v.AddArg(x)
return true
}
break
}
// match: (OR (MOVWconst [val]) x)
- // cond: is32Bit(val)
- // result: (ORI [val] x)
+ // result: (ORI [int64(val)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpRISCV64MOVWconst {
continue
}
- val := v_0.AuxInt
+ val := auxIntToInt32(v_0.AuxInt)
x := v_1
- if !(is32Bit(val)) {
- continue
- }
v.reset(OpRISCV64ORI)
- v.AuxInt = val
+ v.AuxInt = int64ToAuxInt(int64(val))
v.AddArg(x)
return true
}
if v_0.Op != OpRISCV64MOVDconst {
continue
}
- val := v_0.AuxInt
+ val := auxIntToInt64(v_0.AuxInt)
x := v_1
if !(is32Bit(val)) {
continue
}
v.reset(OpRISCV64ORI)
- v.AuxInt = val
+ v.AuxInt = int64ToAuxInt(val)
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SLL x (MOVBconst [val]))
- // result: (SLLI [val&63] x)
+ // result: (SLLI [int64(val&63)] x)
for {
x := v_0
if v_1.Op != OpRISCV64MOVBconst {
break
}
- val := v_1.AuxInt
+ val := auxIntToInt8(v_1.AuxInt)
v.reset(OpRISCV64SLLI)
- v.AuxInt = val & 63
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
v.AddArg(x)
return true
}
// match: (SLL x (MOVHconst [val]))
- // result: (SLLI [val&63] x)
+ // result: (SLLI [int64(val&63)] x)
for {
x := v_0
if v_1.Op != OpRISCV64MOVHconst {
break
}
- val := v_1.AuxInt
+ val := auxIntToInt16(v_1.AuxInt)
v.reset(OpRISCV64SLLI)
- v.AuxInt = val & 63
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
v.AddArg(x)
return true
}
// match: (SLL x (MOVWconst [val]))
- // result: (SLLI [val&63] x)
+ // result: (SLLI [int64(val&63)] x)
for {
x := v_0
if v_1.Op != OpRISCV64MOVWconst {
break
}
- val := v_1.AuxInt
+ val := auxIntToInt32(v_1.AuxInt)
v.reset(OpRISCV64SLLI)
- v.AuxInt = val & 63
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
v.AddArg(x)
return true
}
// match: (SLL x (MOVDconst [val]))
- // result: (SLLI [val&63] x)
+ // result: (SLLI [int64(val&63)] x)
for {
x := v_0
if v_1.Op != OpRISCV64MOVDconst {
break
}
- val := v_1.AuxInt
+ val := auxIntToInt64(v_1.AuxInt)
v.reset(OpRISCV64SLLI)
- v.AuxInt = val & 63
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SRA x (MOVBconst [val]))
- // result: (SRAI [val&63] x)
+ // result: (SRAI [int64(val&63)] x)
for {
x := v_0
if v_1.Op != OpRISCV64MOVBconst {
break
}
- val := v_1.AuxInt
+ val := auxIntToInt8(v_1.AuxInt)
v.reset(OpRISCV64SRAI)
- v.AuxInt = val & 63
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
v.AddArg(x)
return true
}
// match: (SRA x (MOVHconst [val]))
- // result: (SRAI [val&63] x)
+ // result: (SRAI [int64(val&63)] x)
for {
x := v_0
if v_1.Op != OpRISCV64MOVHconst {
break
}
- val := v_1.AuxInt
+ val := auxIntToInt16(v_1.AuxInt)
v.reset(OpRISCV64SRAI)
- v.AuxInt = val & 63
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
v.AddArg(x)
return true
}
// match: (SRA x (MOVWconst [val]))
- // result: (SRAI [val&63] x)
+ // result: (SRAI [int64(val&63)] x)
for {
x := v_0
if v_1.Op != OpRISCV64MOVWconst {
break
}
- val := v_1.AuxInt
+ val := auxIntToInt32(v_1.AuxInt)
v.reset(OpRISCV64SRAI)
- v.AuxInt = val & 63
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
v.AddArg(x)
return true
}
// match: (SRA x (MOVDconst [val]))
- // result: (SRAI [val&63] x)
+ // result: (SRAI [int64(val&63)] x)
for {
x := v_0
if v_1.Op != OpRISCV64MOVDconst {
break
}
- val := v_1.AuxInt
+ val := auxIntToInt64(v_1.AuxInt)
v.reset(OpRISCV64SRAI)
- v.AuxInt = val & 63
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SRL x (MOVBconst [val]))
- // result: (SRLI [val&63] x)
+ // result: (SRLI [int64(val&63)] x)
for {
x := v_0
if v_1.Op != OpRISCV64MOVBconst {
break
}
- val := v_1.AuxInt
+ val := auxIntToInt8(v_1.AuxInt)
v.reset(OpRISCV64SRLI)
- v.AuxInt = val & 63
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
v.AddArg(x)
return true
}
// match: (SRL x (MOVHconst [val]))
- // result: (SRLI [val&63] x)
+ // result: (SRLI [int64(val&63)] x)
for {
x := v_0
if v_1.Op != OpRISCV64MOVHconst {
break
}
- val := v_1.AuxInt
+ val := auxIntToInt16(v_1.AuxInt)
v.reset(OpRISCV64SRLI)
- v.AuxInt = val & 63
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
v.AddArg(x)
return true
}
// match: (SRL x (MOVWconst [val]))
- // result: (SRLI [val&63] x)
+ // result: (SRLI [int64(val&63)] x)
for {
x := v_0
if v_1.Op != OpRISCV64MOVWconst {
break
}
- val := v_1.AuxInt
+ val := auxIntToInt32(v_1.AuxInt)
v.reset(OpRISCV64SRLI)
- v.AuxInt = val & 63
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
v.AddArg(x)
return true
}
// match: (SRL x (MOVDconst [val]))
- // result: (SRLI [val&63] x)
+ // result: (SRLI [int64(val&63)] x)
for {
x := v_0
if v_1.Op != OpRISCV64MOVDconst {
break
}
- val := v_1.AuxInt
+ val := auxIntToInt64(v_1.AuxInt)
v.reset(OpRISCV64SRLI)
- v.AuxInt = val & 63
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SUB x (MOVBconst [val]))
- // cond: is32Bit(-val)
- // result: (ADDI [-val] x)
+ // result: (ADDI [-int64(val)] x)
for {
x := v_0
if v_1.Op != OpRISCV64MOVBconst {
break
}
- val := v_1.AuxInt
- if !(is32Bit(-val)) {
- break
- }
+ val := auxIntToInt8(v_1.AuxInt)
v.reset(OpRISCV64ADDI)
- v.AuxInt = -val
+ v.AuxInt = int64ToAuxInt(-int64(val))
v.AddArg(x)
return true
}
// match: (SUB x (MOVHconst [val]))
- // cond: is32Bit(-val)
- // result: (ADDI [-val] x)
+ // result: (ADDI [-int64(val)] x)
for {
x := v_0
if v_1.Op != OpRISCV64MOVHconst {
break
}
- val := v_1.AuxInt
- if !(is32Bit(-val)) {
- break
- }
+ val := auxIntToInt16(v_1.AuxInt)
v.reset(OpRISCV64ADDI)
- v.AuxInt = -val
+ v.AuxInt = int64ToAuxInt(-int64(val))
v.AddArg(x)
return true
}
// match: (SUB x (MOVWconst [val]))
- // cond: is32Bit(-val)
- // result: (ADDI [-val] x)
+ // cond: is32Bit(-int64(val))
+ // result: (ADDI [-int64(val)] x)
for {
x := v_0
if v_1.Op != OpRISCV64MOVWconst {
break
}
- val := v_1.AuxInt
- if !(is32Bit(-val)) {
+ val := auxIntToInt32(v_1.AuxInt)
+ if !(is32Bit(-int64(val))) {
break
}
v.reset(OpRISCV64ADDI)
- v.AuxInt = -val
+ v.AuxInt = int64ToAuxInt(-int64(val))
v.AddArg(x)
return true
}
if v_1.Op != OpRISCV64MOVDconst {
break
}
- val := v_1.AuxInt
+ val := auxIntToInt64(v_1.AuxInt)
if !(is32Bit(-val)) {
break
}
v.reset(OpRISCV64ADDI)
- v.AuxInt = -val
+ v.AuxInt = int64ToAuxInt(-val)
v.AddArg(x)
return true
}
// result: x
for {
x := v_0
- if v_1.Op != OpRISCV64MOVBconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpRISCV64MOVBconst || auxIntToInt8(v_1.AuxInt) != 0 {
break
}
v.copyOf(x)
// result: x
for {
x := v_0
- if v_1.Op != OpRISCV64MOVHconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpRISCV64MOVHconst || auxIntToInt16(v_1.AuxInt) != 0 {
break
}
v.copyOf(x)
// result: x
for {
x := v_0
- if v_1.Op != OpRISCV64MOVWconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpRISCV64MOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
break
}
v.copyOf(x)
// result: x
for {
x := v_0
- if v_1.Op != OpRISCV64MOVDconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
break
}
v.copyOf(x)
// match: (SUB (MOVBconst [0]) x)
// result: (NEG x)
for {
- if v_0.Op != OpRISCV64MOVBconst || v_0.AuxInt != 0 {
+ if v_0.Op != OpRISCV64MOVBconst || auxIntToInt8(v_0.AuxInt) != 0 {
break
}
x := v_1
// match: (SUB (MOVHconst [0]) x)
// result: (NEG x)
for {
- if v_0.Op != OpRISCV64MOVHconst || v_0.AuxInt != 0 {
+ if v_0.Op != OpRISCV64MOVHconst || auxIntToInt16(v_0.AuxInt) != 0 {
break
}
x := v_1
// match: (SUB (MOVWconst [0]) x)
// result: (NEG x)
for {
- if v_0.Op != OpRISCV64MOVWconst || v_0.AuxInt != 0 {
+ if v_0.Op != OpRISCV64MOVWconst || auxIntToInt32(v_0.AuxInt) != 0 {
break
}
x := v_1
// match: (SUB (MOVDconst [0]) x)
// result: (NEG x)
for {
- if v_0.Op != OpRISCV64MOVDconst || v_0.AuxInt != 0 {
+ if v_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 {
break
}
x := v_1
// result: (ADDIW [0] x)
for {
x := v_0
- if v_1.Op != OpRISCV64MOVWconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpRISCV64MOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
break
}
v.reset(OpRISCV64ADDIW)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
v.AddArg(x)
return true
}
// match: (SUBW (MOVDconst [0]) x)
// result: (NEGW x)
for {
- if v_0.Op != OpRISCV64MOVDconst || v_0.AuxInt != 0 {
+ if v_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 {
break
}
x := v_1
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (XOR (MOVBconst [val]) x)
- // cond: is32Bit(val)
- // result: (XORI [val] x)
+ // result: (XORI [int64(val)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpRISCV64MOVBconst {
continue
}
- val := v_0.AuxInt
+ val := auxIntToInt8(v_0.AuxInt)
x := v_1
- if !(is32Bit(val)) {
- continue
- }
v.reset(OpRISCV64XORI)
- v.AuxInt = val
+ v.AuxInt = int64ToAuxInt(int64(val))
v.AddArg(x)
return true
}
break
}
// match: (XOR (MOVHconst [val]) x)
- // cond: is32Bit(val)
- // result: (XORI [val] x)
+ // result: (XORI [int64(val)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpRISCV64MOVHconst {
continue
}
- val := v_0.AuxInt
+ val := auxIntToInt16(v_0.AuxInt)
x := v_1
- if !(is32Bit(val)) {
- continue
- }
v.reset(OpRISCV64XORI)
- v.AuxInt = val
+ v.AuxInt = int64ToAuxInt(int64(val))
v.AddArg(x)
return true
}
break
}
// match: (XOR (MOVWconst [val]) x)
- // cond: is32Bit(val)
- // result: (XORI [val] x)
+ // result: (XORI [int64(val)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpRISCV64MOVWconst {
continue
}
- val := v_0.AuxInt
+ val := auxIntToInt32(v_0.AuxInt)
x := v_1
- if !(is32Bit(val)) {
- continue
- }
v.reset(OpRISCV64XORI)
- v.AuxInt = val
+ v.AuxInt = int64ToAuxInt(int64(val))
v.AddArg(x)
return true
}
if v_0.Op != OpRISCV64MOVDconst {
continue
}
- val := v_0.AuxInt
+ val := auxIntToInt64(v_0.AuxInt)
x := v_1
if !(is32Bit(val)) {
continue
}
v.reset(OpRISCV64XORI)
- v.AuxInt = val
+ v.AuxInt = int64ToAuxInt(val)
v.AddArg(x)
return true
}
if v_1.Op != OpRISCV64MOVHconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt16(v_1.AuxInt)
v.reset(OpOr16)
v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
v1 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16)
- v1.AuxInt = c & 15
+ v1.AuxInt = int16ToAuxInt(c & 15)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
v3 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16)
- v3.AuxInt = -c & 15
+ v3.AuxInt = int16ToAuxInt(-c & 15)
v2.AddArg2(x, v3)
v.AddArg2(v0, v2)
return true
if v_1.Op != OpRISCV64MOVWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(OpOr32)
v0 := b.NewValue0(v.Pos, OpLsh32x64, t)
v1 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32)
- v1.AuxInt = c & 31
+ v1.AuxInt = int32ToAuxInt(c & 31)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t)
v3 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32)
- v3.AuxInt = -c & 31
+ v3.AuxInt = int32ToAuxInt(-c & 31)
v2.AddArg2(x, v3)
v.AddArg2(v0, v2)
return true
if v_1.Op != OpRISCV64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpOr64)
v0 := b.NewValue0(v.Pos, OpLsh64x64, t)
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
- v1.AuxInt = c & 63
+ v1.AuxInt = int64ToAuxInt(c & 63)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t)
v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
- v3.AuxInt = -c & 63
+ v3.AuxInt = int64ToAuxInt(-c & 63)
v2.AddArg2(x, v3)
v.AddArg2(v0, v2)
return true
if v_1.Op != OpRISCV64MOVBconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt8(v_1.AuxInt)
v.reset(OpOr8)
v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
v1 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8)
- v1.AuxInt = c & 7
+ v1.AuxInt = int8ToAuxInt(c & 7)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
v3 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8)
- v3.AuxInt = -c & 7
+ v3.AuxInt = int8ToAuxInt(-c & 7)
v2.AddArg2(x, v3)
v.AddArg2(v0, v2)
return true
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpNeg16, t)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpNeg16, t)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpNeg16, t)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v3.AddArg(y)
v2.AddArg(v3)
v.AddArg2(v0, v2)
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpNeg16, t)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg2(y, v2)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpNeg32, t)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpNeg32, t)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpNeg32, t)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v3.AddArg(y)
v2.AddArg(v3)
v.AddArg2(v0, v2)
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpNeg32, t)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg2(y, v2)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg64, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg64, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg64, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpNeg64, t)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v.Type = t
v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v.Type = t
v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v.Type = t
v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v2.AddArg(y)
v1.AddArg(v2)
v0.AddArg2(y, v1)
v.Type = t
v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpNeg8, t)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpNeg8, t)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpNeg8, t)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v3.AddArg(y)
v2.AddArg(v3)
v.AddArg2(v0, v2)
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpNeg8, t)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg2(y, v2)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
- v2.AuxInt = -1
+ v2.AuxInt = int64ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
- v3.AuxInt = 64
+ v3.AuxInt = int64ToAuxInt(64)
v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
t := v.Type
x := v_0
v.reset(OpRISCV64SRAI)
- v.AuxInt = 48
+ v.AuxInt = int64ToAuxInt(48)
v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = 48
+ v0.AuxInt = int64ToAuxInt(48)
v0.AddArg(x)
v.AddArg(v0)
return true
t := v.Type
x := v_0
v.reset(OpRISCV64SRAI)
- v.AuxInt = 48
+ v.AuxInt = int64ToAuxInt(48)
v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = 48
+ v0.AuxInt = int64ToAuxInt(48)
v0.AddArg(x)
v.AddArg(v0)
return true
for {
x := v_0
v.reset(OpRISCV64ADDIW)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
v.AddArg(x)
return true
}
t := v.Type
x := v_0
v.reset(OpRISCV64SRAI)
- v.AuxInt = 56
+ v.AuxInt = int64ToAuxInt(56)
v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = 56
+ v0.AuxInt = int64ToAuxInt(56)
v0.AddArg(x)
v.AddArg(v0)
return true
t := v.Type
x := v_0
v.reset(OpRISCV64SRAI)
- v.AuxInt = 56
+ v.AuxInt = int64ToAuxInt(56)
v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = 56
+ v0.AuxInt = int64ToAuxInt(56)
v0.AddArg(x)
v.AddArg(v0)
return true
t := v.Type
x := v_0
v.reset(OpRISCV64SRAI)
- v.AuxInt = 56
+ v.AuxInt = int64ToAuxInt(56)
v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = 56
+ v0.AuxInt = int64ToAuxInt(56)
v0.AddArg(x)
v.AddArg(v0)
return true
x := v_0
v.reset(OpRISCV64NOT)
v0 := b.NewValue0(v.Pos, OpRISCV64SRAI, t)
- v0.AuxInt = 63
+ v0.AuxInt = int64ToAuxInt(63)
v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, t)
- v1.AuxInt = -1
+ v1.AuxInt = int64ToAuxInt(-1)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 1
+ // cond: t.Size() == 1
// result: (MOVBstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 1) {
+ if !(t.Size() == 1) {
break
}
v.reset(OpRISCV64MOVBstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 2
+ // cond: t.Size() == 2
// result: (MOVHstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 2) {
+ if !(t.Size() == 2) {
break
}
v.reset(OpRISCV64MOVHstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)
+ // cond: t.Size() == 4 && !is32BitFloat(val.Type)
// result: (MOVWstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) {
+ if !(t.Size() == 4 && !is32BitFloat(val.Type)) {
break
}
v.reset(OpRISCV64MOVWstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)
+ // cond: t.Size() == 8 && !is64BitFloat(val.Type)
// result: (MOVDstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)) {
+ if !(t.Size() == 8 && !is64BitFloat(val.Type)) {
break
}
v.reset(OpRISCV64MOVDstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
// result: (FMOVWstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpRISCV64FMOVWstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
// result: (FMOVDstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpRISCV64FMOVDstore)
// match: (Zero [0] _ mem)
// result: mem
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
mem := v_1
// match: (Zero [1] ptr mem)
// result: (MOVBstore ptr (MOVBconst) mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt64(v.AuxInt) != 1 {
break
}
ptr := v_0
// match: (Zero [2] ptr mem)
// result: (MOVHstore ptr (MOVHconst) mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt64(v.AuxInt) != 2 {
break
}
ptr := v_0
// match: (Zero [4] ptr mem)
// result: (MOVWstore ptr (MOVWconst) mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
ptr := v_0
// match: (Zero [8] ptr mem)
// result: (MOVDstore ptr (MOVDconst) mem)
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
ptr := v_0
return true
}
// match: (Zero [s] {t} ptr mem)
- // result: (LoweredZero [t.(*types.Type).Alignment()] ptr (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.(*types.Type).Alignment(), config)])) mem)
+ // result: (LoweredZero [t.Alignment()] ptr (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)])) mem)
for {
- s := v.AuxInt
- t := v.Aux
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
v.reset(OpRISCV64LoweredZero)
- v.AuxInt = t.(*types.Type).Alignment()
+ v.AuxInt = int64ToAuxInt(t.Alignment())
v0 := b.NewValue0(v.Pos, OpRISCV64ADD, ptr.Type)
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
- v1.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config)
+ v1.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
v0.AddArg2(ptr, v1)
v.AddArg3(ptr, v0, mem)
return true
t := v.Type
x := v_0
v.reset(OpRISCV64SRLI)
- v.AuxInt = 48
+ v.AuxInt = int64ToAuxInt(48)
v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = 48
+ v0.AuxInt = int64ToAuxInt(48)
v0.AddArg(x)
v.AddArg(v0)
return true
t := v.Type
x := v_0
v.reset(OpRISCV64SRLI)
- v.AuxInt = 48
+ v.AuxInt = int64ToAuxInt(48)
v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = 48
+ v0.AuxInt = int64ToAuxInt(48)
v0.AddArg(x)
v.AddArg(v0)
return true
t := v.Type
x := v_0
v.reset(OpRISCV64SRLI)
- v.AuxInt = 32
+ v.AuxInt = int64ToAuxInt(32)
v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = 32
+ v0.AuxInt = int64ToAuxInt(32)
v0.AddArg(x)
v.AddArg(v0)
return true
t := v.Type
x := v_0
v.reset(OpRISCV64SRLI)
- v.AuxInt = 56
+ v.AuxInt = int64ToAuxInt(56)
v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = 56
+ v0.AuxInt = int64ToAuxInt(56)
v0.AddArg(x)
v.AddArg(v0)
return true
t := v.Type
x := v_0
v.reset(OpRISCV64SRLI)
- v.AuxInt = 56
+ v.AuxInt = int64ToAuxInt(56)
v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = 56
+ v0.AuxInt = int64ToAuxInt(56)
v0.AddArg(x)
v.AddArg(v0)
return true
t := v.Type
x := v_0
v.reset(OpRISCV64SRLI)
- v.AuxInt = 56
+ v.AuxInt = int64ToAuxInt(56)
v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = 56
+ v0.AuxInt = int64ToAuxInt(56)
v0.AddArg(x)
v.AddArg(v0)
return true