// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-(Add(Ptr|64|32|16|8) ...) -> (ADDV ...)
-(Add(32|64)F ...) -> (ADD(F|D) ...)
+(Add(Ptr|64|32|16|8) ...) => (ADDV ...)
+(Add(32|64)F ...) => (ADD(F|D) ...)
-(Sub(Ptr|64|32|16|8) ...) -> (SUBV ...)
-(Sub(32|64)F ...) -> (SUB(F|D) ...)
+(Sub(Ptr|64|32|16|8) ...) => (SUBV ...)
+(Sub(32|64)F ...) => (SUB(F|D) ...)
-(Mul(64|32|16|8) x y) -> (Select1 (MULVU x y))
-(Mul(32|64)F ...) -> (MUL(F|D) ...)
-(Mul64uhilo ...) -> (MULVU ...)
+(Mul(64|32|16|8) x y) => (Select1 (MULVU x y))
+(Mul(32|64)F ...) => (MUL(F|D) ...)
+(Mul64uhilo ...) => (MULVU ...)
(Select0 (Mul64uover x y)) -> (Select1 <typ.UInt64> (MULVU x y))
(Select1 (Mul64uover x y)) -> (SGTU <typ.Bool> (Select0 <typ.UInt64> (MULVU x y)) (MOVVconst <typ.UInt64> [0]))
-(Hmul64 x y) -> (Select0 (MULV x y))
-(Hmul64u x y) -> (Select0 (MULVU x y))
-(Hmul32 x y) -> (SRAVconst (Select1 <typ.Int64> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
-(Hmul32u x y) -> (SRLVconst (Select1 <typ.UInt64> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
-
-(Div64 x y) -> (Select1 (DIVV x y))
-(Div64u x y) -> (Select1 (DIVVU x y))
-(Div32 x y) -> (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
-(Div32u x y) -> (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
-(Div16 x y) -> (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
-(Div16u x y) -> (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
-(Div8 x y) -> (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
-(Div8u x y) -> (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
-(Div(32|64)F ...) -> (DIV(F|D) ...)
-
-(Mod64 x y) -> (Select0 (DIVV x y))
-(Mod64u x y) -> (Select0 (DIVVU x y))
-(Mod32 x y) -> (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
-(Mod32u x y) -> (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
-(Mod16 x y) -> (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
-(Mod16u x y) -> (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
-(Mod8 x y) -> (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
-(Mod8u x y) -> (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Hmul64 x y) => (Select0 (MULV x y))
+(Hmul64u x y) => (Select0 (MULVU x y))
+(Hmul32 x y) => (SRAVconst (Select1 <typ.Int64> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
+(Hmul32u x y) => (SRLVconst (Select1 <typ.UInt64> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
+
+(Div64 x y) => (Select1 (DIVV x y))
+(Div64u x y) => (Select1 (DIVVU x y))
+(Div32 x y) => (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+(Div32u x y) => (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Div16 x y) => (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+(Div16u x y) => (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Div8 x y) => (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+(Div8u x y) => (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Div(32|64)F ...) => (DIV(F|D) ...)
+
+(Mod64 x y) => (Select0 (DIVV x y))
+(Mod64u x y) => (Select0 (DIVVU x y))
+(Mod32 x y) => (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+(Mod32u x y) => (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Mod16 x y) => (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+(Mod16u x y) => (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+(Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
// (x + y) / 2 with x>=y -> (x - y) / 2 + y
(Avg64u <t> x y) -> (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
-(And(64|32|16|8) ...) -> (AND ...)
-(Or(64|32|16|8) ...) -> (OR ...)
-(Xor(64|32|16|8) ...) -> (XOR ...)
+(And(64|32|16|8) ...) => (AND ...)
+(Or(64|32|16|8) ...) => (OR ...)
+(Xor(64|32|16|8) ...) => (XOR ...)
// shifts
// hardware instruction uses only the low 6 bits of the shift
// we compare to 64 to ensure Go semantics for large shifts
-(Lsh64x64 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
-(Lsh64x32 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
-(Lsh64x16 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
-(Lsh64x8 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
-
-(Lsh32x64 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
-(Lsh32x32 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
-(Lsh32x16 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
-(Lsh32x8 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
-
-(Lsh16x64 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
-(Lsh16x32 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
-(Lsh16x16 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
-(Lsh16x8 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
-
-(Lsh8x64 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
-(Lsh8x32 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
-(Lsh8x16 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
-(Lsh8x8 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
-
-(Rsh64Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> x y))
-(Rsh64Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
-(Rsh64Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
-(Rsh64Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
-
-(Rsh32Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
-(Rsh32Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
-(Rsh32Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
-(Rsh32Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
-
-(Rsh16Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
-(Rsh16Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
-(Rsh16Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
-(Rsh16Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
-
-(Rsh8Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
-(Rsh8Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
-(Rsh8Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
-(Rsh8Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
-
-(Rsh64x64 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
-(Rsh64x32 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
-(Rsh64x16 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
-(Rsh64x8 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
-
-(Rsh32x64 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
-(Rsh32x32 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
-(Rsh32x16 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
-(Rsh32x8 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
-
-(Rsh16x64 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
-(Rsh16x32 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
-(Rsh16x16 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
-(Rsh16x8 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
-
-(Rsh8x64 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
-(Rsh8x32 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
-(Rsh8x16 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
-(Rsh8x8 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+(Lsh64x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh64x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh64x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh64x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh32x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh32x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh32x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh32x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh16x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh16x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh16x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh16x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh8x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh8x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh8x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh8x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Rsh64Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> x y))
+(Rsh64Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
+(Rsh64Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
+(Rsh64Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
+
+(Rsh32Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
+(Rsh32Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Rsh32Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
+(Rsh32Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
+
+(Rsh16Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
+(Rsh16Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
+(Rsh16Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Rsh16Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
+
+(Rsh8Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
+(Rsh8Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
+(Rsh8Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
+(Rsh8Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+
+(Rsh64x64 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh64x32 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh64x16 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh64x8 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh32x64 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh32x32 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh32x16 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh32x8 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh16x64 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh16x32 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh16x16 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh16x8 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh8x64 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh8x32 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh8x16 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh8x8 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
// rotates
-(RotateLeft8 <t> x (MOVVconst [c])) -> (Or8 (Lsh8x64 <t> x (MOVVconst [c&7])) (Rsh8Ux64 <t> x (MOVVconst [-c&7])))
-(RotateLeft16 <t> x (MOVVconst [c])) -> (Or16 (Lsh16x64 <t> x (MOVVconst [c&15])) (Rsh16Ux64 <t> x (MOVVconst [-c&15])))
-(RotateLeft32 <t> x (MOVVconst [c])) -> (Or32 (Lsh32x64 <t> x (MOVVconst [c&31])) (Rsh32Ux64 <t> x (MOVVconst [-c&31])))
-(RotateLeft64 <t> x (MOVVconst [c])) -> (Or64 (Lsh64x64 <t> x (MOVVconst [c&63])) (Rsh64Ux64 <t> x (MOVVconst [-c&63])))
+(RotateLeft8 <t> x (MOVVconst [c])) => (Or8 (Lsh8x64 <t> x (MOVVconst [c&7])) (Rsh8Ux64 <t> x (MOVVconst [-c&7])))
+(RotateLeft16 <t> x (MOVVconst [c])) => (Or16 (Lsh16x64 <t> x (MOVVconst [c&15])) (Rsh16Ux64 <t> x (MOVVconst [-c&15])))
+(RotateLeft32 <t> x (MOVVconst [c])) => (Or32 (Lsh32x64 <t> x (MOVVconst [c&31])) (Rsh32Ux64 <t> x (MOVVconst [-c&31])))
+(RotateLeft64 <t> x (MOVVconst [c])) => (Or64 (Lsh64x64 <t> x (MOVVconst [c&63])) (Rsh64Ux64 <t> x (MOVVconst [-c&63])))
// unary ops
-(Neg(64|32|16|8) ...) -> (NEGV ...)
-(Neg(32|64)F ...) -> (NEG(F|D) ...)
+(Neg(64|32|16|8) ...) => (NEGV ...)
+(Neg(32|64)F ...) => (NEG(F|D) ...)
-(Com(64|32|16|8) x) -> (NOR (MOVVconst [0]) x)
+(Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x)
-(Sqrt ...) -> (SQRTD ...)
+(Sqrt ...) => (SQRTD ...)
// boolean ops -- booleans are represented with 0=false, 1=true
-(AndB ...) -> (AND ...)
-(OrB ...) -> (OR ...)
-(EqB x y) -> (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
-(NeqB ...) -> (XOR ...)
-(Not x) -> (XORconst [1] x)
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XORconst [1] x)
// constants
(Const(64|32|16|8) ...) -> (MOVVconst ...)
(Const(32|64)F ...) -> (MOV(F|D)const ...)
-(ConstNil) -> (MOVVconst [0])
+(ConstNil) => (MOVVconst [0])
(ConstBool ...) -> (MOVVconst ...)
-(Slicemask <t> x) -> (SRAVconst (NEGV <t> x) [63])
+(Slicemask <t> x) => (SRAVconst (NEGV <t> x) [63])
// truncations
// Because we ignore high parts of registers, truncates are just copies.
-(Trunc16to8 ...) -> (Copy ...)
-(Trunc32to8 ...) -> (Copy ...)
-(Trunc32to16 ...) -> (Copy ...)
-(Trunc64to8 ...) -> (Copy ...)
-(Trunc64to16 ...) -> (Copy ...)
-(Trunc64to32 ...) -> (Copy ...)
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
// Zero-/Sign-extensions
-(ZeroExt8to16 ...) -> (MOVBUreg ...)
-(ZeroExt8to32 ...) -> (MOVBUreg ...)
-(ZeroExt16to32 ...) -> (MOVHUreg ...)
-(ZeroExt8to64 ...) -> (MOVBUreg ...)
-(ZeroExt16to64 ...) -> (MOVHUreg ...)
-(ZeroExt32to64 ...) -> (MOVWUreg ...)
-
-(SignExt8to16 ...) -> (MOVBreg ...)
-(SignExt8to32 ...) -> (MOVBreg ...)
-(SignExt16to32 ...) -> (MOVHreg ...)
-(SignExt8to64 ...) -> (MOVBreg ...)
-(SignExt16to64 ...) -> (MOVHreg ...)
-(SignExt32to64 ...) -> (MOVWreg ...)
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+(ZeroExt8to64 ...) => (MOVBUreg ...)
+(ZeroExt16to64 ...) => (MOVHUreg ...)
+(ZeroExt32to64 ...) => (MOVWUreg ...)
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+(SignExt8to64 ...) => (MOVBreg ...)
+(SignExt16to64 ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
// float <-> int conversion
-(Cvt32to32F ...) -> (MOVWF ...)
-(Cvt32to64F ...) -> (MOVWD ...)
-(Cvt64to32F ...) -> (MOVVF ...)
-(Cvt64to64F ...) -> (MOVVD ...)
-(Cvt32Fto32 ...) -> (TRUNCFW ...)
-(Cvt64Fto32 ...) -> (TRUNCDW ...)
-(Cvt32Fto64 ...) -> (TRUNCFV ...)
-(Cvt64Fto64 ...) -> (TRUNCDV ...)
-(Cvt32Fto64F ...) -> (MOVFD ...)
-(Cvt64Fto32F ...) -> (MOVDF ...)
+(Cvt32to32F ...) => (MOVWF ...)
+(Cvt32to64F ...) => (MOVWD ...)
+(Cvt64to32F ...) => (MOVVF ...)
+(Cvt64to64F ...) => (MOVVD ...)
+(Cvt32Fto32 ...) => (TRUNCFW ...)
+(Cvt64Fto32 ...) => (TRUNCDW ...)
+(Cvt32Fto64 ...) => (TRUNCFV ...)
+(Cvt64Fto64 ...) => (TRUNCDV ...)
+(Cvt32Fto64F ...) => (MOVFD ...)
+(Cvt64Fto32F ...) => (MOVDF ...)
-(CvtBoolToUint8 ...) -> (Copy ...)
+(CvtBoolToUint8 ...) => (Copy ...)
-(Round(32|64)F ...) -> (Copy ...)
+(Round(32|64)F ...) => (Copy ...)
// comparisons
-(Eq8 x y) -> (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
-(Eq16 x y) -> (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
-(Eq32 x y) -> (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
-(Eq64 x y) -> (SGTU (MOVVconst [1]) (XOR x y))
-(EqPtr x y) -> (SGTU (MOVVconst [1]) (XOR x y))
-(Eq(32|64)F x y) -> (FPFlagTrue (CMPEQ(F|D) x y))
-
-(Neq8 x y) -> (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
-(Neq16 x y) -> (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
-(Neq32 x y) -> (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
-(Neq64 x y) -> (SGTU (XOR x y) (MOVVconst [0]))
-(NeqPtr x y) -> (SGTU (XOR x y) (MOVVconst [0]))
-(Neq(32|64)F x y) -> (FPFlagFalse (CMPEQ(F|D) x y))
-
-(Less8 x y) -> (SGT (SignExt8to64 y) (SignExt8to64 x))
-(Less16 x y) -> (SGT (SignExt16to64 y) (SignExt16to64 x))
-(Less32 x y) -> (SGT (SignExt32to64 y) (SignExt32to64 x))
-(Less64 x y) -> (SGT y x)
-(Less(32|64)F x y) -> (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
-
-(Less8U x y) -> (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
-(Less16U x y) -> (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
-(Less32U x y) -> (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
-(Less64U x y) -> (SGTU y x)
-
-(Leq8 x y) -> (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
-(Leq16 x y) -> (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
-(Leq32 x y) -> (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
-(Leq64 x y) -> (XOR (MOVVconst [1]) (SGT x y))
-(Leq(32|64)F x y) -> (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
-
-(Leq8U x y) -> (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
-(Leq16U x y) -> (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
-(Leq32U x y) -> (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
-(Leq64U x y) -> (XOR (MOVVconst [1]) (SGTU x y))
+(Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y))
+(EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y))
+(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y))
+
+(Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
+(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
+(Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
+(Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0]))
+(NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0]))
+(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y))
+
+(Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x))
+(Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x))
+(Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x))
+(Less64 x y) => (SGT y x)
+(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
+
+(Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
+(Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
+(Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
+(Less64U x y) => (SGTU y x)
+
+(Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
+(Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
+(Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
+(Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y))
+(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
+
+(Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y))
(OffPtr [off] ptr:(SP)) -> (MOVVaddr [off] ptr)
(OffPtr [off] ptr) -> (ADDVconst [off] ptr)
(LocalAddr {sym} base _) -> (MOVVaddr {sym} base)
// loads
-(Load <t> ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem)
-(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem)
-(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem)
-(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem)
-(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem)
-(Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) -> (MOVWload ptr mem)
-(Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) -> (MOVWUload ptr mem)
-(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVVload ptr mem)
-(Load <t> ptr mem) && is32BitFloat(t) -> (MOVFload ptr mem)
-(Load <t> ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem)
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) => (MOVWUload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
// stores
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type) -> (MOVVstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVVstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVDstore ptr val mem)
// zeroing
-(Zero [0] _ mem) -> mem
-(Zero [1] ptr mem) -> (MOVBstore ptr (MOVVconst [0]) mem)
-(Zero [2] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem)
+(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
(MOVHstore ptr (MOVVconst [0]) mem)
-(Zero [2] ptr mem) ->
+(Zero [2] ptr mem) =>
(MOVBstore [1] ptr (MOVVconst [0])
(MOVBstore [0] ptr (MOVVconst [0]) mem))
-(Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
(MOVWstore ptr (MOVVconst [0]) mem)
-(Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
+(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
(MOVHstore [2] ptr (MOVVconst [0])
(MOVHstore [0] ptr (MOVVconst [0]) mem))
-(Zero [4] ptr mem) ->
+(Zero [4] ptr mem) =>
(MOVBstore [3] ptr (MOVVconst [0])
(MOVBstore [2] ptr (MOVVconst [0])
(MOVBstore [1] ptr (MOVVconst [0])
(MOVBstore [0] ptr (MOVVconst [0]) mem))))
-(Zero [8] {t} ptr mem) && t.(*types.Type).Alignment()%8 == 0 ->
+(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
(MOVVstore ptr (MOVVconst [0]) mem)
-(Zero [8] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
(MOVWstore [4] ptr (MOVVconst [0])
(MOVWstore [0] ptr (MOVVconst [0]) mem))
-(Zero [8] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
+(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
(MOVHstore [6] ptr (MOVVconst [0])
(MOVHstore [4] ptr (MOVVconst [0])
(MOVHstore [2] ptr (MOVVconst [0])
(MOVHstore [0] ptr (MOVVconst [0]) mem))))
-(Zero [3] ptr mem) ->
+(Zero [3] ptr mem) =>
(MOVBstore [2] ptr (MOVVconst [0])
(MOVBstore [1] ptr (MOVVconst [0])
(MOVBstore [0] ptr (MOVVconst [0]) mem)))
-(Zero [6] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
+(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
(MOVHstore [4] ptr (MOVVconst [0])
(MOVHstore [2] ptr (MOVVconst [0])
(MOVHstore [0] ptr (MOVVconst [0]) mem)))
-(Zero [12] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
(MOVWstore [8] ptr (MOVVconst [0])
(MOVWstore [4] ptr (MOVVconst [0])
(MOVWstore [0] ptr (MOVVconst [0]) mem)))
-(Zero [16] {t} ptr mem) && t.(*types.Type).Alignment()%8 == 0 ->
+(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
(MOVVstore [8] ptr (MOVVconst [0])
(MOVVstore [0] ptr (MOVVconst [0]) mem))
-(Zero [24] {t} ptr mem) && t.(*types.Type).Alignment()%8 == 0 ->
+(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
(MOVVstore [16] ptr (MOVVconst [0])
(MOVVstore [8] ptr (MOVVconst [0])
(MOVVstore [0] ptr (MOVVconst [0]) mem)))
// 8, and 128 are magic constants, see runtime/mkduff.go
(Zero [s] {t} ptr mem)
&& s%8 == 0 && s > 24 && s <= 8*128
- && t.(*types.Type).Alignment()%8 == 0 && !config.noDuffDevice ->
+ && t.Alignment()%8 == 0 && !config.noDuffDevice =>
(DUFFZERO [8 * (128 - s/8)] ptr mem)
// large or unaligned zeroing uses a loop
(Zero [s] {t} ptr mem)
- && (s > 8*128 || config.noDuffDevice) || t.(*types.Type).Alignment()%8 != 0 ->
- (LoweredZero [t.(*types.Type).Alignment()]
+ && (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 =>
+ (LoweredZero [t.Alignment()]
ptr
- (ADDVconst <ptr.Type> ptr [s-moveSize(t.(*types.Type).Alignment(), config)])
+ (ADDVconst <ptr.Type> ptr [s-moveSize(t.Alignment(), config)])
mem)
// moves
-(Move [0] _ _ mem) -> mem
-(Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem)
-(Move [2] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
(MOVHstore dst (MOVHload src mem) mem)
-(Move [2] dst src mem) ->
+(Move [2] dst src mem) =>
(MOVBstore [1] dst (MOVBload [1] src mem)
(MOVBstore dst (MOVBload src mem) mem))
-(Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
(MOVWstore dst (MOVWload src mem) mem)
-(Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
+(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem))
-(Move [4] dst src mem) ->
+(Move [4] dst src mem) =>
(MOVBstore [3] dst (MOVBload [3] src mem)
(MOVBstore [2] dst (MOVBload [2] src mem)
(MOVBstore [1] dst (MOVBload [1] src mem)
(MOVBstore dst (MOVBload src mem) mem))))
-(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%8 == 0 ->
+(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
(MOVVstore dst (MOVVload src mem) mem)
-(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVWstore dst (MOVWload src mem) mem))
-(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
+(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
(MOVHstore [6] dst (MOVHload [6] src mem)
(MOVHstore [4] dst (MOVHload [4] src mem)
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem))))
-(Move [3] dst src mem) ->
+(Move [3] dst src mem) =>
(MOVBstore [2] dst (MOVBload [2] src mem)
(MOVBstore [1] dst (MOVBload [1] src mem)
(MOVBstore dst (MOVBload src mem) mem)))
-(Move [6] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
+(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
(MOVHstore [4] dst (MOVHload [4] src mem)
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem)))
-(Move [12] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
(MOVWstore [8] dst (MOVWload [8] src mem)
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVWstore dst (MOVWload src mem) mem)))
-(Move [16] {t} dst src mem) && t.(*types.Type).Alignment()%8 == 0 ->
+(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
(MOVVstore [8] dst (MOVVload [8] src mem)
(MOVVstore dst (MOVVload src mem) mem))
-(Move [24] {t} dst src mem) && t.(*types.Type).Alignment()%8 == 0 ->
+(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
(MOVVstore [16] dst (MOVVload [16] src mem)
(MOVVstore [8] dst (MOVVload [8] src mem)
(MOVVstore dst (MOVVload src mem) mem)))
// medium move uses a duff device
(Move [s] {t} dst src mem)
- && s%8 == 0 && s >= 24 && s <= 8*128 && t.(*types.Type).Alignment()%8 == 0
- && !config.noDuffDevice && logLargeCopy(v, s) ->
+ && s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
(DUFFCOPY [16 * (128 - s/8)] dst src mem)
// 16 and 128 are magic constants. 16 is the number of bytes to encode:
// MOVV (R1), R23
// large or unaligned move uses a loop
(Move [s] {t} dst src mem)
- && s > 24 && logLargeCopy(v, s) || t.(*types.Type).Alignment()%8 != 0 ->
- (LoweredMove [t.(*types.Type).Alignment()]
+ && s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 =>
+ (LoweredMove [t.Alignment()]
dst
src
- (ADDVconst <src.Type> src [s-moveSize(t.(*types.Type).Alignment(), config)])
+ (ADDVconst <src.Type> src [s-moveSize(t.Alignment(), config)])
mem)
// calls
-(StaticCall ...) -> (CALLstatic ...)
-(ClosureCall ...) -> (CALLclosure ...)
-(InterCall ...) -> (CALLinter ...)
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
// atomic intrinsics
(AtomicLoad8 ...) -> (LoweredAtomicLoad8 ...)
(AtomicCompareAndSwap64 ...) -> (LoweredAtomicCas64 ...)
// checks
-(NilCheck ...) -> (LoweredNilCheck ...)
-(IsNonNil ptr) -> (SGTU ptr (MOVVconst [0]))
-(IsInBounds idx len) -> (SGTU len idx)
-(IsSliceInBounds idx len) -> (XOR (MOVVconst [1]) (SGTU idx len))
+(NilCheck ...) => (LoweredNilCheck ...)
+(IsNonNil ptr) => (SGTU ptr (MOVVconst [0]))
+(IsInBounds idx len) => (SGTU len idx)
+(IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len))
// pseudo-ops
-(GetClosurePtr ...) -> (LoweredGetClosurePtr ...)
-(GetCallerSP ...) -> (LoweredGetCallerSP ...)
-(GetCallerPC ...) -> (LoweredGetCallerPC ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
-(If cond yes no) -> (NE cond yes no)
+(If cond yes no) => (NE cond yes no)
// Write barrier.
-(WB ...) -> (LoweredWB ...)
+(WB ...) => (LoweredWB ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 -> (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 -> (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 -> (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
// Optimizations
// Absorb boolean tests into block
-(NE (FPFlagTrue cmp) yes no) -> (FPT cmp yes no)
-(NE (FPFlagFalse cmp) yes no) -> (FPF cmp yes no)
-(EQ (FPFlagTrue cmp) yes no) -> (FPF cmp yes no)
-(EQ (FPFlagFalse cmp) yes no) -> (FPT cmp yes no)
-(NE (XORconst [1] cmp:(SGT _ _)) yes no) -> (EQ cmp yes no)
-(NE (XORconst [1] cmp:(SGTU _ _)) yes no) -> (EQ cmp yes no)
-(NE (XORconst [1] cmp:(SGTconst _)) yes no) -> (EQ cmp yes no)
-(NE (XORconst [1] cmp:(SGTUconst _)) yes no) -> (EQ cmp yes no)
-(EQ (XORconst [1] cmp:(SGT _ _)) yes no) -> (NE cmp yes no)
-(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) -> (NE cmp yes no)
-(EQ (XORconst [1] cmp:(SGTconst _)) yes no) -> (NE cmp yes no)
-(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) -> (NE cmp yes no)
-(NE (SGTUconst [1] x) yes no) -> (EQ x yes no)
-(EQ (SGTUconst [1] x) yes no) -> (NE x yes no)
-(NE (SGTU x (MOVVconst [0])) yes no) -> (NE x yes no)
-(EQ (SGTU x (MOVVconst [0])) yes no) -> (EQ x yes no)
-(NE (SGTconst [0] x) yes no) -> (LTZ x yes no)
-(EQ (SGTconst [0] x) yes no) -> (GEZ x yes no)
-(NE (SGT x (MOVVconst [0])) yes no) -> (GTZ x yes no)
-(EQ (SGT x (MOVVconst [0])) yes no) -> (LEZ x yes no)
+(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
+(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
+(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no)
+(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no)
+(NE (SGTUconst [1] x) yes no) => (EQ x yes no)
+(EQ (SGTUconst [1] x) yes no) => (NE x yes no)
+(NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no)
+(EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no)
+(NE (SGTconst [0] x) yes no) => (LTZ x yes no)
+(EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
+(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
+(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
// fold offset into address
(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) -> (MOVVaddr [off1+off2] {sym} ptr)
(MOVVstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
// store zero
-(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
-(MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
-(MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
-(MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) -> (MOVVstorezero [off] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+(MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVVstorezero [off] {sym} ptr mem)
// don't extend after proper load
-(MOVBreg x:(MOVBload _ _)) -> (MOVVreg x)
-(MOVBUreg x:(MOVBUload _ _)) -> (MOVVreg x)
-(MOVHreg x:(MOVBload _ _)) -> (MOVVreg x)
-(MOVHreg x:(MOVBUload _ _)) -> (MOVVreg x)
-(MOVHreg x:(MOVHload _ _)) -> (MOVVreg x)
-(MOVHUreg x:(MOVBUload _ _)) -> (MOVVreg x)
-(MOVHUreg x:(MOVHUload _ _)) -> (MOVVreg x)
-(MOVWreg x:(MOVBload _ _)) -> (MOVVreg x)
-(MOVWreg x:(MOVBUload _ _)) -> (MOVVreg x)
-(MOVWreg x:(MOVHload _ _)) -> (MOVVreg x)
-(MOVWreg x:(MOVHUload _ _)) -> (MOVVreg x)
-(MOVWreg x:(MOVWload _ _)) -> (MOVVreg x)
-(MOVWUreg x:(MOVBUload _ _)) -> (MOVVreg x)
-(MOVWUreg x:(MOVHUload _ _)) -> (MOVVreg x)
-(MOVWUreg x:(MOVWUload _ _)) -> (MOVVreg x)
+(MOVBreg x:(MOVBload _ _)) => (MOVVreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVVreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVVreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVBload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVHload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVHUload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVWload _ _)) => (MOVVreg x)
+(MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x)
+(MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x)
// fold double extensions
-(MOVBreg x:(MOVBreg _)) -> (MOVVreg x)
-(MOVBUreg x:(MOVBUreg _)) -> (MOVVreg x)
-(MOVHreg x:(MOVBreg _)) -> (MOVVreg x)
-(MOVHreg x:(MOVBUreg _)) -> (MOVVreg x)
-(MOVHreg x:(MOVHreg _)) -> (MOVVreg x)
-(MOVHUreg x:(MOVBUreg _)) -> (MOVVreg x)
-(MOVHUreg x:(MOVHUreg _)) -> (MOVVreg x)
-(MOVWreg x:(MOVBreg _)) -> (MOVVreg x)
-(MOVWreg x:(MOVBUreg _)) -> (MOVVreg x)
-(MOVWreg x:(MOVHreg _)) -> (MOVVreg x)
-(MOVWreg x:(MOVWreg _)) -> (MOVVreg x)
-(MOVWUreg x:(MOVBUreg _)) -> (MOVVreg x)
-(MOVWUreg x:(MOVHUreg _)) -> (MOVVreg x)
-(MOVWUreg x:(MOVWUreg _)) -> (MOVVreg x)
+(MOVBreg x:(MOVBreg _)) => (MOVVreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVVreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVVreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVBreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVHreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVWreg _)) => (MOVVreg x)
+(MOVWUreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVWUreg x:(MOVHUreg _)) => (MOVVreg x)
+(MOVWUreg x:(MOVWUreg _)) => (MOVVreg x)
// don't extend before store
-(MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
-(MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
-(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
-(MOVHstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
-(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
-(MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
-(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
// if a register move has only 1 use, just use the same register without emitting instruction
// MOVVnop doesn't emit instruction, only for ensuring the type.
-(MOVVreg x) && x.Uses == 1 -> (MOVVnop x)
+(MOVVreg x) && x.Uses == 1 => (MOVVnop x)
// fold constant into arithmatic ops
-(ADDV x (MOVVconst [c])) && is32Bit(c) -> (ADDVconst [c] x)
-(SUBV x (MOVVconst [c])) && is32Bit(c) -> (SUBVconst [c] x)
-(AND x (MOVVconst [c])) && is32Bit(c) -> (ANDconst [c] x)
-(OR x (MOVVconst [c])) && is32Bit(c) -> (ORconst [c] x)
-(XOR x (MOVVconst [c])) && is32Bit(c) -> (XORconst [c] x)
-(NOR x (MOVVconst [c])) && is32Bit(c) -> (NORconst [c] x)
-
-(SLLV _ (MOVVconst [c])) && uint64(c)>=64 -> (MOVVconst [0])
-(SRLV _ (MOVVconst [c])) && uint64(c)>=64 -> (MOVVconst [0])
-(SRAV x (MOVVconst [c])) && uint64(c)>=64 -> (SRAVconst x [63])
-(SLLV x (MOVVconst [c])) -> (SLLVconst x [c])
-(SRLV x (MOVVconst [c])) -> (SRLVconst x [c])
-(SRAV x (MOVVconst [c])) -> (SRAVconst x [c])
-
-(SGT (MOVVconst [c]) x) && is32Bit(c) -> (SGTconst [c] x)
-(SGTU (MOVVconst [c]) x) && is32Bit(c) -> (SGTUconst [c] x)
+(ADDV x (MOVVconst [c])) && is32Bit(c) => (ADDVconst [c] x)
+(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x)
+(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x)
+(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x)
+(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x)
+(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x)
+
+(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
+(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
+(SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63])
+(SLLV x (MOVVconst [c])) => (SLLVconst x [c])
+(SRLV x (MOVVconst [c])) => (SRLVconst x [c])
+(SRAV x (MOVVconst [c])) => (SRAVconst x [c])
+
+(SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x)
+(SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x)
// mul by constant
-(Select1 (MULVU x (MOVVconst [-1]))) -> (NEGV x)
-(Select1 (MULVU _ (MOVVconst [0]))) -> (MOVVconst [0])
-(Select1 (MULVU x (MOVVconst [1]))) -> x
-(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo(c) -> (SLLVconst [log2(c)] x)
+(Select1 (MULVU x (MOVVconst [-1]))) => (NEGV x)
+(Select1 (MULVU _ (MOVVconst [0]))) => (MOVVconst [0])
+(Select1 (MULVU x (MOVVconst [1]))) => x
+(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo(c) => (SLLVconst [log2(c)] x)
// div by constant
-(Select1 (DIVVU x (MOVVconst [1]))) -> x
-(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo(c) -> (SRLVconst [log2(c)] x)
-(Select0 (DIVVU _ (MOVVconst [1]))) -> (MOVVconst [0]) // mod
-(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo(c) -> (ANDconst [c-1] x) // mod
+(Select1 (DIVVU x (MOVVconst [1]))) => x
+(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo(c) => (SRLVconst [log2(c)] x)
+(Select0 (DIVVU _ (MOVVconst [1]))) => (MOVVconst [0]) // mod
+(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo(c) => (ANDconst [c-1] x) // mod
// generic simplifications
-(ADDV x (NEGV y)) -> (SUBV x y)
-(SUBV x x) -> (MOVVconst [0])
-(SUBV (MOVVconst [0]) x) -> (NEGV x)
-(AND x x) -> x
-(OR x x) -> x
-(XOR x x) -> (MOVVconst [0])
+(ADDV x (NEGV y)) => (SUBV x y)
+(SUBV x x) => (MOVVconst [0])
+(SUBV (MOVVconst [0]) x) => (NEGV x)
+(AND x x) => x
+(OR x x) => x
+(XOR x x) => (MOVVconst [0])
// remove redundant *const ops
-(ADDVconst [0] x) -> x
-(SUBVconst [0] x) -> x
-(ANDconst [0] _) -> (MOVVconst [0])
-(ANDconst [-1] x) -> x
-(ORconst [0] x) -> x
-(ORconst [-1] _) -> (MOVVconst [-1])
-(XORconst [0] x) -> x
-(XORconst [-1] x) -> (NORconst [0] x)
+(ADDVconst [0] x) => x
+(SUBVconst [0] x) => x
+(ANDconst [0] _) => (MOVVconst [0])
+(ANDconst [-1] x) => x
+(ORconst [0] x) => x
+(ORconst [-1] _) => (MOVVconst [-1])
+(XORconst [0] x) => x
+(XORconst [-1] x) => (NORconst [0] x)
// generic constant folding
-(ADDVconst [c] (MOVVconst [d])) -> (MOVVconst [c+d])
-(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) -> (ADDVconst [c+d] x)
-(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) -> (ADDVconst [c-d] x)
-(SUBVconst [c] (MOVVconst [d])) -> (MOVVconst [d-c])
-(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) -> (ADDVconst [-c-d] x)
-(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) -> (ADDVconst [-c+d] x)
-(SLLVconst [c] (MOVVconst [d])) -> (MOVVconst [d<<uint64(c)])
-(SRLVconst [c] (MOVVconst [d])) -> (MOVVconst [int64(uint64(d)>>uint64(c))])
-(SRAVconst [c] (MOVVconst [d])) -> (MOVVconst [d>>uint64(c)])
-(Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) -> (MOVVconst [c*d])
-(Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) -> (MOVVconst [c/d])
-(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) -> (MOVVconst [int64(uint64(c)/uint64(d))])
-(Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) -> (MOVVconst [c%d]) // mod
-(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) -> (MOVVconst [int64(uint64(c)%uint64(d))]) // mod
-(ANDconst [c] (MOVVconst [d])) -> (MOVVconst [c&d])
-(ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
-(ORconst [c] (MOVVconst [d])) -> (MOVVconst [c|d])
-(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) -> (ORconst [c|d] x)
-(XORconst [c] (MOVVconst [d])) -> (MOVVconst [c^d])
-(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) -> (XORconst [c^d] x)
-(NORconst [c] (MOVVconst [d])) -> (MOVVconst [^(c|d)])
-(NEGV (MOVVconst [c])) -> (MOVVconst [-c])
-(MOVBreg (MOVVconst [c])) -> (MOVVconst [int64(int8(c))])
-(MOVBUreg (MOVVconst [c])) -> (MOVVconst [int64(uint8(c))])
-(MOVHreg (MOVVconst [c])) -> (MOVVconst [int64(int16(c))])
-(MOVHUreg (MOVVconst [c])) -> (MOVVconst [int64(uint16(c))])
-(MOVWreg (MOVVconst [c])) -> (MOVVconst [int64(int32(c))])
-(MOVWUreg (MOVVconst [c])) -> (MOVVconst [int64(uint32(c))])
-(MOVVreg (MOVVconst [c])) -> (MOVVconst [c])
+(ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d])
+(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x)
+(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x)
+(SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c])
+(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x)
+(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x)
+(SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d<<uint64(c)])
+(SRLVconst [c] (MOVVconst [d])) => (MOVVconst [int64(uint64(d)>>uint64(c))])
+(SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)])
+(Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c*d])
+(Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c/d])
+(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [int64(uint64(c)/uint64(d))])
+(Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c%d]) // mod
+(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod
+(ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d])
+(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x)
+(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d])
+(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x)
+(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)])
+(NEGV (MOVVconst [c])) => (MOVVconst [-c])
+(MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))])
+(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))])
+(MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))])
+(MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))])
+(MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))])
+(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))])
+(MOVVreg (MOVVconst [c])) => (MOVVconst [c])
(LoweredAtomicStore32 ptr (MOVVconst [0]) mem) -> (LoweredAtomicStorezero32 ptr mem)
(LoweredAtomicStore64 ptr (MOVVconst [0]) mem) -> (LoweredAtomicStorezero64 ptr mem)
(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) -> (LoweredAtomicAddconst32 [c] ptr mem)
(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) -> (LoweredAtomicAddconst64 [c] ptr mem)
// constant comparisons
-(SGTconst [c] (MOVVconst [d])) && c>d -> (MOVVconst [1])
-(SGTconst [c] (MOVVconst [d])) && c<=d -> (MOVVconst [0])
-(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) -> (MOVVconst [1])
-(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) -> (MOVVconst [0])
+(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1])
+(SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0])
+(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1])
+(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0])
// other known comparisons
-(SGTconst [c] (MOVBreg _)) && 0x7f < c -> (MOVVconst [1])
-(SGTconst [c] (MOVBreg _)) && c <= -0x80 -> (MOVVconst [0])
-(SGTconst [c] (MOVBUreg _)) && 0xff < c -> (MOVVconst [1])
-(SGTconst [c] (MOVBUreg _)) && c < 0 -> (MOVVconst [0])
-(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) -> (MOVVconst [1])
-(SGTconst [c] (MOVHreg _)) && 0x7fff < c -> (MOVVconst [1])
-(SGTconst [c] (MOVHreg _)) && c <= -0x8000 -> (MOVVconst [0])
-(SGTconst [c] (MOVHUreg _)) && 0xffff < c -> (MOVVconst [1])
-(SGTconst [c] (MOVHUreg _)) && c < 0 -> (MOVVconst [0])
-(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) -> (MOVVconst [1])
-(SGTconst [c] (MOVWUreg _)) && c < 0 -> (MOVVconst [0])
-(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c -> (MOVVconst [1])
-(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) -> (MOVVconst [1])
-(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) -> (MOVVconst [1])
-(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) -> (MOVVconst [1])
+(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1])
+(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0])
+(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1])
+(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0])
+(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1])
+(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1])
+(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0])
+(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1])
+(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0])
+(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1])
+(SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0])
+(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1])
+(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1])
+(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
+(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
// absorb constants into branches
-(EQ (MOVVconst [0]) yes no) -> (First yes no)
-(EQ (MOVVconst [c]) yes no) && c != 0 -> (First no yes)
-(NE (MOVVconst [0]) yes no) -> (First no yes)
-(NE (MOVVconst [c]) yes no) && c != 0 -> (First yes no)
-(LTZ (MOVVconst [c]) yes no) && c < 0 -> (First yes no)
-(LTZ (MOVVconst [c]) yes no) && c >= 0 -> (First no yes)
-(LEZ (MOVVconst [c]) yes no) && c <= 0 -> (First yes no)
-(LEZ (MOVVconst [c]) yes no) && c > 0 -> (First no yes)
-(GTZ (MOVVconst [c]) yes no) && c > 0 -> (First yes no)
-(GTZ (MOVVconst [c]) yes no) && c <= 0 -> (First no yes)
-(GEZ (MOVVconst [c]) yes no) && c >= 0 -> (First yes no)
-(GEZ (MOVVconst [c]) yes no) && c < 0 -> (First no yes)
+(EQ (MOVVconst [0]) yes no) => (First yes no)
+(EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes)
+(NE (MOVVconst [0]) yes no) => (First no yes)
+(NE (MOVVconst [c]) yes no) && c != 0 => (First yes no)
+(LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no)
+(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes)
+(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no)
+(LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes)
+(GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no)
+(GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes)
+(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no)
+(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes)
x := v_0
v.reset(OpMIPS64NOR)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v.AddArg2(v0, x)
return true
}
x := v_0
v.reset(OpMIPS64NOR)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v.AddArg2(v0, x)
return true
}
x := v_0
v.reset(OpMIPS64NOR)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v.AddArg2(v0, x)
return true
}
x := v_0
v.reset(OpMIPS64NOR)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v.AddArg2(v0, x)
return true
}
// result: (MOVVconst [0])
for {
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
}
y := v_1
v.reset(OpMIPS64SGTU)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v2.AddArg(x)
y := v_1
v.reset(OpMIPS64SGTU)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v2.AddArg(x)
y := v_1
v.reset(OpMIPS64SGTU)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
v1.AddArg2(x, y)
v.AddArg2(v0, v1)
y := v_1
v.reset(OpMIPS64SGTU)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v2.AddArg(x)
y := v_1
v.reset(OpMIPS64XOR)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.Bool)
v1.AddArg2(x, y)
v.AddArg2(v0, v1)
y := v_1
v.reset(OpMIPS64SGTU)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
v1.AddArg2(x, y)
v.AddArg2(v0, v1)
x := v_0
y := v_1
v.reset(OpMIPS64SRAVconst)
- v.AuxInt = 32
+ v.AuxInt = int64ToAuxInt(32)
v0 := b.NewValue0(v.Pos, OpSelect1, typ.Int64)
v1 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64))
v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
x := v_0
y := v_1
v.reset(OpMIPS64SRLVconst)
- v.AuxInt = 32
+ v.AuxInt = int64ToAuxInt(32)
v0 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
ptr := v_0
v.reset(OpMIPS64SGTU)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v.AddArg2(ptr, v0)
return true
}
len := v_1
v.reset(OpMIPS64XOR)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v1.AddArg2(idx, len)
v.AddArg2(v0, v1)
y := v_1
v.reset(OpMIPS64XOR)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v2.AddArg(x)
y := v_1
v.reset(OpMIPS64XOR)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v2.AddArg(x)
y := v_1
v.reset(OpMIPS64XOR)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v2.AddArg(x)
y := v_1
v.reset(OpMIPS64XOR)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v2.AddArg(x)
y := v_1
v.reset(OpMIPS64XOR)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
v1.AddArg2(x, y)
v.AddArg2(v0, v1)
y := v_1
v.reset(OpMIPS64XOR)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v1.AddArg2(x, y)
v.AddArg2(v0, v1)
y := v_1
v.reset(OpMIPS64XOR)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v2.AddArg(x)
y := v_1
v.reset(OpMIPS64XOR)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v2.AddArg(x)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v1.AddArg2(v2, y)
v0.AddArg(v1)
v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v1.AddArg2(v2, y)
v0.AddArg(v1)
v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v1.AddArg2(v2, y)
v0.AddArg(v1)
v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v1.AddArg2(v2, y)
v0.AddArg(v1)
v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
if v_1.Op != OpMIPS64MOVVconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(is32Bit(c)) {
continue
}
v.reset(OpMIPS64ADDVconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (ADDVconst [0] x)
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
x := v_0
// match: (ADDVconst [c] (MOVVconst [d]))
// result: (MOVVconst [c+d])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = c + d
+ v.AuxInt = int64ToAuxInt(c + d)
return true
}
// match: (ADDVconst [c] (ADDVconst [d] x))
// cond: is32Bit(c+d)
// result: (ADDVconst [c+d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64ADDVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(c + d)) {
break
}
v.reset(OpMIPS64ADDVconst)
- v.AuxInt = c + d
+ v.AuxInt = int64ToAuxInt(c + d)
v.AddArg(x)
return true
}
// cond: is32Bit(c-d)
// result: (ADDVconst [c-d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64SUBVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(c - d)) {
break
}
v.reset(OpMIPS64ADDVconst)
- v.AuxInt = c - d
+ v.AuxInt = int64ToAuxInt(c - d)
v.AddArg(x)
return true
}
if v_1.Op != OpMIPS64MOVVconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(is32Bit(c)) {
continue
}
v.reset(OpMIPS64ANDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (ANDconst [0] _)
// result: (MOVVconst [0])
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (ANDconst [-1] x)
// result: x
for {
- if v.AuxInt != -1 {
+ if auxIntToInt64(v.AuxInt) != -1 {
break
}
x := v_0
// match: (ANDconst [c] (MOVVconst [d]))
// result: (MOVVconst [c&d])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = c & d
+ v.AuxInt = int64ToAuxInt(c & d)
return true
}
// match: (ANDconst [c] (ANDconst [d] x))
// result: (ANDconst [c&d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64ANDconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpMIPS64ANDconst)
- v.AuxInt = c & d
+ v.AuxInt = int64ToAuxInt(c & d)
v.AddArg(x)
return true
}
if v_0.Op != OpMIPS64MOVVconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = int64(uint8(c))
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
return true
}
return false
if v_0.Op != OpMIPS64MOVVconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = int64(int8(c))
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
return true
}
return false
// match: (MOVBstore [off] {sym} ptr (MOVVconst [0]) mem)
// result: (MOVBstorezero [off] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpMIPS64MOVVconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpMIPS64MOVBstorezero)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPS64MOVBreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPS64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPS64MOVBUreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPS64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPS64MOVHreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPS64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPS64MOVHUreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPS64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPS64MOVWreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPS64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPS64MOVWUreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPS64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
if v_0.Op != OpMIPS64MOVVconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = int64(uint16(c))
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
return true
}
return false
if v_0.Op != OpMIPS64MOVVconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = int64(int16(c))
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
return true
}
return false
// match: (MOVHstore [off] {sym} ptr (MOVVconst [0]) mem)
// result: (MOVHstorezero [off] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpMIPS64MOVVconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpMIPS64MOVHstorezero)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
// result: (MOVHstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPS64MOVHreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPS64MOVHstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
// result: (MOVHstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPS64MOVHUreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPS64MOVHstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
// result: (MOVHstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPS64MOVWreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPS64MOVHstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
// result: (MOVHstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPS64MOVWUreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPS64MOVHstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
if v_0.Op != OpMIPS64MOVVconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
return true
}
return false
// match: (MOVVstore [off] {sym} ptr (MOVVconst [0]) mem)
// result: (MOVVstorezero [off] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpMIPS64MOVVconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpMIPS64MOVVstorezero)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
if v_0.Op != OpMIPS64MOVVconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = int64(uint32(c))
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
return true
}
return false
if v_0.Op != OpMIPS64MOVVconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = int64(int32(c))
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
return true
}
return false
// match: (MOVWstore [off] {sym} ptr (MOVVconst [0]) mem)
// result: (MOVWstorezero [off] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpMIPS64MOVVconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpMIPS64MOVWstorezero)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
// result: (MOVWstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPS64MOVWreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPS64MOVWstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
// result: (MOVWstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPS64MOVWUreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPS64MOVWstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
if v_0.Op != OpMIPS64MOVVconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = -c
+ v.AuxInt = int64ToAuxInt(-c)
return true
}
return false
if v_1.Op != OpMIPS64MOVVconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(is32Bit(c)) {
continue
}
v.reset(OpMIPS64NORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (NORconst [c] (MOVVconst [d]))
// result: (MOVVconst [^(c|d)])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = ^(c | d)
+ v.AuxInt = int64ToAuxInt(^(c | d))
return true
}
return false
if v_1.Op != OpMIPS64MOVVconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(is32Bit(c)) {
continue
}
v.reset(OpMIPS64ORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (ORconst [0] x)
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
x := v_0
// match: (ORconst [-1] _)
// result: (MOVVconst [-1])
for {
- if v.AuxInt != -1 {
+ if auxIntToInt64(v.AuxInt) != -1 {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = -1
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
// match: (ORconst [c] (MOVVconst [d]))
// result: (MOVVconst [c|d])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = c | d
+ v.AuxInt = int64ToAuxInt(c | d)
return true
}
// match: (ORconst [c] (ORconst [d] x))
// cond: is32Bit(c|d)
// result: (ORconst [c|d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64ORconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(c | d)) {
break
}
v.reset(OpMIPS64ORconst)
- v.AuxInt = c | d
+ v.AuxInt = int64ToAuxInt(c | d)
v.AddArg(x)
return true
}
if v_0.Op != OpMIPS64MOVVconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
if !(is32Bit(c)) {
break
}
v.reset(OpMIPS64SGTconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != OpMIPS64MOVVconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
if !(is32Bit(c)) {
break
}
v.reset(OpMIPS64SGTUconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// cond: uint64(c)>uint64(d)
// result: (MOVVconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
if !(uint64(c) > uint64(d)) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (SGTUconst [c] (MOVVconst [d]))
// cond: uint64(c)<=uint64(d)
// result: (MOVVconst [0])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
if !(uint64(c) <= uint64(d)) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (SGTUconst [c] (MOVBUreg _))
// cond: 0xff < uint64(c)
// result: (MOVVconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVBUreg || !(0xff < uint64(c)) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (SGTUconst [c] (MOVHUreg _))
// cond: 0xffff < uint64(c)
// result: (MOVVconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVHUreg || !(0xffff < uint64(c)) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (SGTUconst [c] (ANDconst [m] _))
// cond: uint64(m) < uint64(c)
// result: (MOVVconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64ANDconst {
break
}
- m := v_0.AuxInt
+ m := auxIntToInt64(v_0.AuxInt)
if !(uint64(m) < uint64(c)) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (SGTUconst [c] (SRLVconst _ [d]))
// cond: 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)
// result: (MOVVconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64SRLVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
if !(0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
return false
// cond: c>d
// result: (MOVVconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
if !(c > d) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (SGTconst [c] (MOVVconst [d]))
// cond: c<=d
// result: (MOVVconst [0])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
if !(c <= d) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (SGTconst [c] (MOVBreg _))
// cond: 0x7f < c
// result: (MOVVconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVBreg || !(0x7f < c) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (SGTconst [c] (MOVBreg _))
// cond: c <= -0x80
// result: (MOVVconst [0])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVBreg || !(c <= -0x80) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (SGTconst [c] (MOVBUreg _))
// cond: 0xff < c
// result: (MOVVconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVBUreg || !(0xff < c) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (SGTconst [c] (MOVBUreg _))
// cond: c < 0
// result: (MOVVconst [0])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVBUreg || !(c < 0) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (SGTconst [c] (MOVHreg _))
// cond: 0x7fff < c
// result: (MOVVconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVHreg || !(0x7fff < c) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (SGTconst [c] (MOVHreg _))
// cond: c <= -0x8000
// result: (MOVVconst [0])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVHreg || !(c <= -0x8000) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (SGTconst [c] (MOVHUreg _))
// cond: 0xffff < c
// result: (MOVVconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVHUreg || !(0xffff < c) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (SGTconst [c] (MOVHUreg _))
// cond: c < 0
// result: (MOVVconst [0])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVHUreg || !(c < 0) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (SGTconst [c] (MOVWUreg _))
// cond: c < 0
// result: (MOVVconst [0])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVWUreg || !(c < 0) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (SGTconst [c] (ANDconst [m] _))
// cond: 0 <= m && m < c
// result: (MOVVconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64ANDconst {
break
}
- m := v_0.AuxInt
+ m := auxIntToInt64(v_0.AuxInt)
if !(0 <= m && m < c) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
// match: (SGTconst [c] (SRLVconst _ [d]))
// cond: 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)
// result: (MOVVconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64SRLVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
if !(0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
return true
}
return false
if v_1.Op != OpMIPS64MOVVconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 64) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (SLLV x (MOVVconst [c]))
if v_1.Op != OpMIPS64MOVVconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpMIPS64SLLVconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (SLLVconst [c] (MOVVconst [d]))
// result: (MOVVconst [d<<uint64(c)])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = d << uint64(c)
+ v.AuxInt = int64ToAuxInt(d << uint64(c))
return true
}
return false
if v_1.Op != OpMIPS64MOVVconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 64) {
break
}
v.reset(OpMIPS64SRAVconst)
- v.AuxInt = 63
+ v.AuxInt = int64ToAuxInt(63)
v.AddArg(x)
return true
}
if v_1.Op != OpMIPS64MOVVconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpMIPS64SRAVconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (SRAVconst [c] (MOVVconst [d]))
// result: (MOVVconst [d>>uint64(c)])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = d >> uint64(c)
+ v.AuxInt = int64ToAuxInt(d >> uint64(c))
return true
}
return false
if v_1.Op != OpMIPS64MOVVconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint64(c) >= 64) {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (SRLV x (MOVVconst [c]))
if v_1.Op != OpMIPS64MOVVconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpMIPS64SRLVconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (SRLVconst [c] (MOVVconst [d]))
// result: (MOVVconst [int64(uint64(d)>>uint64(c))])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = int64(uint64(d) >> uint64(c))
+ v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c)))
return true
}
return false
if v_1.Op != OpMIPS64MOVVconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(is32Bit(c)) {
break
}
v.reset(OpMIPS64SUBVconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (SUBV (MOVVconst [0]) x)
// result: (NEGV x)
for {
- if v_0.Op != OpMIPS64MOVVconst || v_0.AuxInt != 0 {
+ if v_0.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0.AuxInt) != 0 {
break
}
x := v_1
// match: (SUBVconst [0] x)
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
x := v_0
// match: (SUBVconst [c] (MOVVconst [d]))
// result: (MOVVconst [d-c])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = d - c
+ v.AuxInt = int64ToAuxInt(d - c)
return true
}
// match: (SUBVconst [c] (SUBVconst [d] x))
// cond: is32Bit(-c-d)
// result: (ADDVconst [-c-d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64SUBVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(-c - d)) {
break
}
v.reset(OpMIPS64ADDVconst)
- v.AuxInt = -c - d
+ v.AuxInt = int64ToAuxInt(-c - d)
v.AddArg(x)
return true
}
// cond: is32Bit(-c+d)
// result: (ADDVconst [-c+d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64ADDVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(-c + d)) {
break
}
v.reset(OpMIPS64ADDVconst)
- v.AuxInt = -c + d
+ v.AuxInt = int64ToAuxInt(-c + d)
v.AddArg(x)
return true
}
if v_1.Op != OpMIPS64MOVVconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(is32Bit(c)) {
continue
}
v.reset(OpMIPS64XORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
return false
// match: (XORconst [0] x)
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
x := v_0
// match: (XORconst [-1] x)
// result: (NORconst [0] x)
for {
- if v.AuxInt != -1 {
+ if auxIntToInt64(v.AuxInt) != -1 {
break
}
x := v_0
v.reset(OpMIPS64NORconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
v.AddArg(x)
return true
}
// match: (XORconst [c] (MOVVconst [d]))
// result: (MOVVconst [c^d])
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVVconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = c ^ d
+ v.AuxInt = int64ToAuxInt(c ^ d)
return true
}
// match: (XORconst [c] (XORconst [d] x))
// cond: is32Bit(c^d)
// result: (XORconst [c^d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64XORconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(c ^ d)) {
break
}
v.reset(OpMIPS64XORconst)
- v.AuxInt = c ^ d
+ v.AuxInt = int64ToAuxInt(c ^ d)
v.AddArg(x)
return true
}
// match: (Move [0] _ _ mem)
// result: mem
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
mem := v_2
// match: (Move [1] dst src mem)
// result: (MOVBstore dst (MOVBload src mem) mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt64(v.AuxInt) != 1 {
break
}
dst := v_0
return true
}
// match: (Move [2] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%2 == 0
+ // cond: t.Alignment()%2 == 0
// result: (MOVHstore dst (MOVHload src mem) mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt64(v.AuxInt) != 2 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%2 == 0) {
+ if !(t.Alignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
// match: (Move [2] dst src mem)
// result: (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))
for {
- if v.AuxInt != 2 {
+ if auxIntToInt64(v.AuxInt) != 2 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpMIPS64MOVBstore)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
- v0.AuxInt = 1
+ v0.AuxInt = int32ToAuxInt(1)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVWstore dst (MOVWload src mem) mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%2 == 0
+ // cond: t.Alignment()%2 == 0
// result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%2 == 0) {
+ if !(t.Alignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
- v.AuxInt = 2
+ v.AuxInt = int32ToAuxInt(2)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
- v0.AuxInt = 2
+ v0.AuxInt = int32ToAuxInt(2)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
// match: (Move [4] dst src mem)
// result: (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))))
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpMIPS64MOVBstore)
- v.AuxInt = 3
+ v.AuxInt = int32ToAuxInt(3)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
- v0.AuxInt = 3
+ v0.AuxInt = int32ToAuxInt(3)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
- v1.AuxInt = 2
+ v1.AuxInt = int32ToAuxInt(2)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
- v2.AuxInt = 2
+ v2.AuxInt = int32ToAuxInt(2)
v2.AddArg2(src, mem)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
- v3.AuxInt = 1
+ v3.AuxInt = int32ToAuxInt(1)
v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
- v4.AuxInt = 1
+ v4.AuxInt = int32ToAuxInt(1)
v4.AddArg2(src, mem)
v5 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
v6 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%8 == 0
+ // cond: t.Alignment()%8 == 0
// result: (MOVVstore dst (MOVVload src mem) mem)
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%8 == 0) {
+ if !(t.Alignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
- v0.AuxInt = 4
+ v0.AuxInt = int32ToAuxInt(4)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%2 == 0
+ // cond: t.Alignment()%2 == 0
// result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%2 == 0) {
+ if !(t.Alignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
- v.AuxInt = 6
+ v.AuxInt = int32ToAuxInt(6)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
- v0.AuxInt = 6
+ v0.AuxInt = int32ToAuxInt(6)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
- v1.AuxInt = 4
+ v1.AuxInt = int32ToAuxInt(4)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
- v2.AuxInt = 4
+ v2.AuxInt = int32ToAuxInt(4)
v2.AddArg2(src, mem)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
- v3.AuxInt = 2
+ v3.AuxInt = int32ToAuxInt(2)
v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
- v4.AuxInt = 2
+ v4.AuxInt = int32ToAuxInt(2)
v4.AddArg2(src, mem)
v5 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
v6 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
// match: (Move [3] dst src mem)
// result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))
for {
- if v.AuxInt != 3 {
+ if auxIntToInt64(v.AuxInt) != 3 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpMIPS64MOVBstore)
- v.AuxInt = 2
+ v.AuxInt = int32ToAuxInt(2)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
- v0.AuxInt = 2
+ v0.AuxInt = int32ToAuxInt(2)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
- v1.AuxInt = 1
+ v1.AuxInt = int32ToAuxInt(1)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
- v2.AuxInt = 1
+ v2.AuxInt = int32ToAuxInt(1)
v2.AddArg2(src, mem)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
return true
}
// match: (Move [6] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%2 == 0
+ // cond: t.Alignment()%2 == 0
// result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
for {
- if v.AuxInt != 6 {
+ if auxIntToInt64(v.AuxInt) != 6 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%2 == 0) {
+ if !(t.Alignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
- v0.AuxInt = 4
+ v0.AuxInt = int32ToAuxInt(4)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
- v1.AuxInt = 2
+ v1.AuxInt = int32ToAuxInt(2)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
- v2.AuxInt = 2
+ v2.AuxInt = int32ToAuxInt(2)
v2.AddArg2(src, mem)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
return true
}
// match: (Move [12] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
for {
- if v.AuxInt != 12 {
+ if auxIntToInt64(v.AuxInt) != 12 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
- v.AuxInt = 8
+ v.AuxInt = int32ToAuxInt(8)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
- v0.AuxInt = 8
+ v0.AuxInt = int32ToAuxInt(8)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
- v1.AuxInt = 4
+ v1.AuxInt = int32ToAuxInt(4)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
- v2.AuxInt = 4
+ v2.AuxInt = int32ToAuxInt(4)
v2.AddArg2(src, mem)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
v4 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
return true
}
// match: (Move [16] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%8 == 0
+ // cond: t.Alignment()%8 == 0
// result: (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem))
for {
- if v.AuxInt != 16 {
+ if auxIntToInt64(v.AuxInt) != 16 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%8 == 0) {
+ if !(t.Alignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
- v.AuxInt = 8
+ v.AuxInt = int32ToAuxInt(8)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
- v0.AuxInt = 8
+ v0.AuxInt = int32ToAuxInt(8)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
return true
}
// match: (Move [24] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%8 == 0
+ // cond: t.Alignment()%8 == 0
// result: (MOVVstore [16] dst (MOVVload [16] src mem) (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem)))
for {
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%8 == 0) {
+ if !(t.Alignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
- v.AuxInt = 16
+ v.AuxInt = int32ToAuxInt(16)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
- v0.AuxInt = 16
+ v0.AuxInt = int32ToAuxInt(16)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
- v1.AuxInt = 8
+ v1.AuxInt = int32ToAuxInt(8)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
- v2.AuxInt = 8
+ v2.AuxInt = int32ToAuxInt(8)
v2.AddArg2(src, mem)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
v4 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
return true
}
// match: (Move [s] {t} dst src mem)
- // cond: s%8 == 0 && s >= 24 && s <= 8*128 && t.(*types.Type).Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // cond: s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
// result: (DUFFCOPY [16 * (128 - s/8)] dst src mem)
for {
- s := v.AuxInt
- t := v.Aux
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(s%8 == 0 && s >= 24 && s <= 8*128 && t.(*types.Type).Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ if !(s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
break
}
v.reset(OpMIPS64DUFFCOPY)
- v.AuxInt = 16 * (128 - s/8)
+ v.AuxInt = int64ToAuxInt(16 * (128 - s/8))
v.AddArg3(dst, src, mem)
return true
}
// match: (Move [s] {t} dst src mem)
- // cond: s > 24 && logLargeCopy(v, s) || t.(*types.Type).Alignment()%8 != 0
- // result: (LoweredMove [t.(*types.Type).Alignment()] dst src (ADDVconst <src.Type> src [s-moveSize(t.(*types.Type).Alignment(), config)]) mem)
+ // cond: s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0
+ // result: (LoweredMove [t.Alignment()] dst src (ADDVconst <src.Type> src [s-moveSize(t.Alignment(), config)]) mem)
for {
- s := v.AuxInt
- t := v.Aux
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(s > 24 && logLargeCopy(v, s) || t.(*types.Type).Alignment()%8 != 0) {
+ if !(s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0) {
break
}
v.reset(OpMIPS64LoweredMove)
- v.AuxInt = t.(*types.Type).Alignment()
+ v.AuxInt = int64ToAuxInt(t.Alignment())
v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, src.Type)
- v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config)
+ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
v0.AddArg(src)
v.AddArg4(dst, src, v0, mem)
return true
v2.AddArg(y)
v0.AddArg2(v1, v2)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v3.AuxInt = 0
+ v3.AuxInt = int64ToAuxInt(0)
v.AddArg2(v0, v3)
return true
}
v2.AddArg(y)
v0.AddArg2(v1, v2)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v3.AuxInt = 0
+ v3.AuxInt = int64ToAuxInt(0)
v.AddArg2(v0, v3)
return true
}
v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v1.AuxInt = 0
+ v1.AuxInt = int64ToAuxInt(0)
v.AddArg2(v0, v1)
return true
}
v2.AddArg(y)
v0.AddArg2(v1, v2)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v3.AuxInt = 0
+ v3.AuxInt = int64ToAuxInt(0)
v.AddArg2(v0, v3)
return true
}
v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v1.AuxInt = 0
+ v1.AuxInt = int64ToAuxInt(0)
v.AddArg2(v0, v1)
return true
}
for {
x := v_0
v.reset(OpMIPS64XORconst)
- v.AuxInt = 1
+ v.AuxInt = int64ToAuxInt(1)
v.AddArg(x)
return true
}
// cond: boundsABI(kind) == 0
// result: (LoweredPanicBoundsA [kind] x y mem)
for {
- kind := v.AuxInt
+ kind := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
mem := v_2
break
}
v.reset(OpMIPS64LoweredPanicBoundsA)
- v.AuxInt = kind
+ v.AuxInt = int64ToAuxInt(kind)
v.AddArg3(x, y, mem)
return true
}
// cond: boundsABI(kind) == 1
// result: (LoweredPanicBoundsB [kind] x y mem)
for {
- kind := v.AuxInt
+ kind := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
mem := v_2
break
}
v.reset(OpMIPS64LoweredPanicBoundsB)
- v.AuxInt = kind
+ v.AuxInt = int64ToAuxInt(kind)
v.AddArg3(x, y, mem)
return true
}
// cond: boundsABI(kind) == 2
// result: (LoweredPanicBoundsC [kind] x y mem)
for {
- kind := v.AuxInt
+ kind := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
mem := v_2
break
}
v.reset(OpMIPS64LoweredPanicBoundsC)
- v.AuxInt = kind
+ v.AuxInt = int64ToAuxInt(kind)
v.AddArg3(x, y, mem)
return true
}
if v_1.Op != OpMIPS64MOVVconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpOr16)
v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v1.AuxInt = c & 15
+ v1.AuxInt = int64ToAuxInt(c & 15)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v3.AuxInt = -c & 15
+ v3.AuxInt = int64ToAuxInt(-c & 15)
v2.AddArg2(x, v3)
v.AddArg2(v0, v2)
return true
if v_1.Op != OpMIPS64MOVVconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpOr32)
v0 := b.NewValue0(v.Pos, OpLsh32x64, t)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v1.AuxInt = c & 31
+ v1.AuxInt = int64ToAuxInt(c & 31)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v3.AuxInt = -c & 31
+ v3.AuxInt = int64ToAuxInt(-c & 31)
v2.AddArg2(x, v3)
v.AddArg2(v0, v2)
return true
if v_1.Op != OpMIPS64MOVVconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpOr64)
v0 := b.NewValue0(v.Pos, OpLsh64x64, t)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v1.AuxInt = c & 63
+ v1.AuxInt = int64ToAuxInt(c & 63)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v3.AuxInt = -c & 63
+ v3.AuxInt = int64ToAuxInt(-c & 63)
v2.AddArg2(x, v3)
v.AddArg2(v0, v2)
return true
if v_1.Op != OpMIPS64MOVVconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpOr8)
v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v1.AuxInt = c & 7
+ v1.AuxInt = int64ToAuxInt(c & 7)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v3.AuxInt = -c & 7
+ v3.AuxInt = int64ToAuxInt(-c & 7)
v2.AddArg2(x, v3)
v.AddArg2(v0, v2)
return true
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v1.AddArg2(v2, y)
v0.AddArg(v1)
v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v5.AuxInt = 63
+ v5.AuxInt = int64ToAuxInt(63)
v3.AddArg2(v4, v5)
v2.AddArg(v3)
v1.AddArg2(v2, v4)
v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v5.AuxInt = 63
+ v5.AuxInt = int64ToAuxInt(63)
v3.AddArg2(v4, v5)
v2.AddArg(v3)
v1.AddArg2(v2, v4)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v4.AuxInt = 63
+ v4.AuxInt = int64ToAuxInt(63)
v3.AddArg2(y, v4)
v2.AddArg(v3)
v1.AddArg2(v2, y)
v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v5.AuxInt = 63
+ v5.AuxInt = int64ToAuxInt(63)
v3.AddArg2(v4, v5)
v2.AddArg(v3)
v1.AddArg2(v2, v4)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v1.AddArg2(v2, y)
v0.AddArg(v1)
v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v5.AuxInt = 63
+ v5.AuxInt = int64ToAuxInt(63)
v3.AddArg2(v4, v5)
v2.AddArg(v3)
v1.AddArg2(v2, v4)
v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v5.AuxInt = 63
+ v5.AuxInt = int64ToAuxInt(63)
v3.AddArg2(v4, v5)
v2.AddArg(v3)
v1.AddArg2(v2, v4)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v4.AuxInt = 63
+ v4.AuxInt = int64ToAuxInt(63)
v3.AddArg2(y, v4)
v2.AddArg(v3)
v1.AddArg2(v2, y)
v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v5.AuxInt = 63
+ v5.AuxInt = int64ToAuxInt(63)
v3.AddArg2(v4, v5)
v2.AddArg(v3)
v1.AddArg2(v2, v4)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v1.AddArg2(v2, y)
v0.AddArg(v1)
v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v4.AuxInt = 63
+ v4.AuxInt = int64ToAuxInt(63)
v2.AddArg2(v3, v4)
v1.AddArg(v2)
v0.AddArg2(v1, v3)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v4.AuxInt = 63
+ v4.AuxInt = int64ToAuxInt(63)
v2.AddArg2(v3, v4)
v1.AddArg(v2)
v0.AddArg2(v1, v3)
v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v3.AuxInt = 63
+ v3.AuxInt = int64ToAuxInt(63)
v2.AddArg2(y, v3)
v1.AddArg(v2)
v0.AddArg2(v1, y)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v4.AuxInt = 63
+ v4.AuxInt = int64ToAuxInt(63)
v2.AddArg2(v3, v4)
v1.AddArg(v2)
v0.AddArg2(v1, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v1.AddArg2(v2, y)
v0.AddArg(v1)
v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v2.AuxInt = 64
+ v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg2(v2, v3)
v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v5.AuxInt = 63
+ v5.AuxInt = int64ToAuxInt(63)
v3.AddArg2(v4, v5)
v2.AddArg(v3)
v1.AddArg2(v2, v4)
v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v5.AuxInt = 63
+ v5.AuxInt = int64ToAuxInt(63)
v3.AddArg2(v4, v5)
v2.AddArg(v3)
v1.AddArg2(v2, v4)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v4.AuxInt = 63
+ v4.AuxInt = int64ToAuxInt(63)
v3.AddArg2(y, v4)
v2.AddArg(v3)
v1.AddArg2(v2, y)
v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v5.AuxInt = 63
+ v5.AuxInt = int64ToAuxInt(63)
v3.AddArg2(v4, v5)
v2.AddArg(v3)
v1.AddArg2(v2, v4)
}
_ = v_0.Args[1]
v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 1 {
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 {
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (Select0 (DIVVU x (MOVVconst [c])))
if v_0_1.Op != OpMIPS64MOVVconst {
break
}
- c := v_0_1.AuxInt
+ c := auxIntToInt64(v_0_1.AuxInt)
if !(isPowerOfTwo(c)) {
break
}
v.reset(OpMIPS64ANDconst)
- v.AuxInt = c - 1
+ v.AuxInt = int64ToAuxInt(c - 1)
v.AddArg(x)
return true
}
if v_0_0.Op != OpMIPS64MOVVconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpMIPS64MOVVconst {
break
}
- d := v_0_1.AuxInt
+ d := auxIntToInt64(v_0_1.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = c % d
+ v.AuxInt = int64ToAuxInt(c % d)
return true
}
// match: (Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d])))
if v_0_0.Op != OpMIPS64MOVVconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpMIPS64MOVVconst {
break
}
- d := v_0_1.AuxInt
+ d := auxIntToInt64(v_0_1.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = int64(uint64(c) % uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d)))
return true
}
return false
v_0_1 := v_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
x := v_0_0
- if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != -1 {
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != -1 {
continue
}
v.reset(OpMIPS64NEGV)
v_0_0 := v_0.Args[0]
v_0_1 := v_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 {
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
continue
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
break
v_0_1 := v_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
x := v_0_0
- if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 1 {
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 {
continue
}
v.copyOf(x)
if v_0_1.Op != OpMIPS64MOVVconst {
continue
}
- c := v_0_1.AuxInt
+ c := auxIntToInt64(v_0_1.AuxInt)
if !(isPowerOfTwo(c)) {
continue
}
v.reset(OpMIPS64SLLVconst)
- v.AuxInt = log2(c)
+ v.AuxInt = int64ToAuxInt(log2(c))
v.AddArg(x)
return true
}
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 1 {
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 {
break
}
v.copyOf(x)
if v_0_1.Op != OpMIPS64MOVVconst {
break
}
- c := v_0_1.AuxInt
+ c := auxIntToInt64(v_0_1.AuxInt)
if !(isPowerOfTwo(c)) {
break
}
v.reset(OpMIPS64SRLVconst)
- v.AuxInt = log2(c)
+ v.AuxInt = int64ToAuxInt(log2(c))
v.AddArg(x)
return true
}
if v_0_0.Op != OpMIPS64MOVVconst {
continue
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
if v_0_1.Op != OpMIPS64MOVVconst {
continue
}
- d := v_0_1.AuxInt
+ d := auxIntToInt64(v_0_1.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = c * d
+ v.AuxInt = int64ToAuxInt(c * d)
return true
}
break
if v_0_0.Op != OpMIPS64MOVVconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpMIPS64MOVVconst {
break
}
- d := v_0_1.AuxInt
+ d := auxIntToInt64(v_0_1.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = c / d
+ v.AuxInt = int64ToAuxInt(c / d)
return true
}
// match: (Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d])))
if v_0_0.Op != OpMIPS64MOVVconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt64(v_0_0.AuxInt)
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpMIPS64MOVVconst {
break
}
- d := v_0_1.AuxInt
+ d := auxIntToInt64(v_0_1.AuxInt)
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = int64(uint64(c) / uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d)))
return true
}
return false
t := v.Type
x := v_0
v.reset(OpMIPS64SRAVconst)
- v.AuxInt = 63
+ v.AuxInt = int64ToAuxInt(63)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
v0.AddArg(x)
v.AddArg(v0)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 1
+ // cond: t.Size() == 1
// result: (MOVBstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 1) {
+ if !(t.Size() == 1) {
break
}
v.reset(OpMIPS64MOVBstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 2
+ // cond: t.Size() == 2
// result: (MOVHstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 2) {
+ if !(t.Size() == 2) {
break
}
v.reset(OpMIPS64MOVHstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)
+ // cond: t.Size() == 4 && !is32BitFloat(val.Type)
// result: (MOVWstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) {
+ if !(t.Size() == 4 && !is32BitFloat(val.Type)) {
break
}
v.reset(OpMIPS64MOVWstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)
+ // cond: t.Size() == 8 && !is64BitFloat(val.Type)
// result: (MOVVstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)) {
+ if !(t.Size() == 8 && !is64BitFloat(val.Type)) {
break
}
v.reset(OpMIPS64MOVVstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
// result: (MOVFstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpMIPS64MOVFstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
// result: (MOVDstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpMIPS64MOVDstore)
// match: (Zero [0] _ mem)
// result: mem
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
mem := v_1
// match: (Zero [1] ptr mem)
// result: (MOVBstore ptr (MOVVconst [0]) mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt64(v.AuxInt) != 1 {
break
}
ptr := v_0
mem := v_1
v.reset(OpMIPS64MOVBstore)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (Zero [2] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%2 == 0
+ // cond: t.Alignment()%2 == 0
// result: (MOVHstore ptr (MOVVconst [0]) mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt64(v.AuxInt) != 2 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%2 == 0) {
+ if !(t.Alignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (Zero [2] ptr mem)
// result: (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))
for {
- if v.AuxInt != 2 {
+ if auxIntToInt64(v.AuxInt) != 2 {
break
}
ptr := v_0
mem := v_1
v.reset(OpMIPS64MOVBstore)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v1.AddArg3(ptr, v0, mem)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVWstore ptr (MOVVconst [0]) mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%2 == 0
+ // cond: t.Alignment()%2 == 0
// result: (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%2 == 0) {
+ if !(t.Alignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
- v.AuxInt = 2
+ v.AuxInt = int32ToAuxInt(2)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v1.AddArg3(ptr, v0, mem)
v.AddArg3(ptr, v0, v1)
return true
// match: (Zero [4] ptr mem)
// result: (MOVBstore [3] ptr (MOVVconst [0]) (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))))
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
ptr := v_0
mem := v_1
v.reset(OpMIPS64MOVBstore)
- v.AuxInt = 3
+ v.AuxInt = int32ToAuxInt(3)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
- v1.AuxInt = 2
+ v1.AuxInt = int32ToAuxInt(2)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
- v2.AuxInt = 1
+ v2.AuxInt = int32ToAuxInt(1)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
- v3.AuxInt = 0
+ v3.AuxInt = int32ToAuxInt(0)
v3.AddArg3(ptr, v0, mem)
v2.AddArg3(ptr, v0, v3)
v1.AddArg3(ptr, v0, v2)
return true
}
// match: (Zero [8] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%8 == 0
+ // cond: t.Alignment()%8 == 0
// result: (MOVVstore ptr (MOVVconst [0]) mem)
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%8 == 0) {
+ if !(t.Alignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (Zero [8] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem))
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v1.AddArg3(ptr, v0, mem)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (Zero [8] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%2 == 0
+ // cond: t.Alignment()%2 == 0
// result: (MOVHstore [6] ptr (MOVVconst [0]) (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))))
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%2 == 0) {
+ if !(t.Alignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
- v.AuxInt = 6
+ v.AuxInt = int32ToAuxInt(6)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
- v1.AuxInt = 4
+ v1.AuxInt = int32ToAuxInt(4)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
- v2.AuxInt = 2
+ v2.AuxInt = int32ToAuxInt(2)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
- v3.AuxInt = 0
+ v3.AuxInt = int32ToAuxInt(0)
v3.AddArg3(ptr, v0, mem)
v2.AddArg3(ptr, v0, v3)
v1.AddArg3(ptr, v0, v2)
// match: (Zero [3] ptr mem)
// result: (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)))
for {
- if v.AuxInt != 3 {
+ if auxIntToInt64(v.AuxInt) != 3 {
break
}
ptr := v_0
mem := v_1
v.reset(OpMIPS64MOVBstore)
- v.AuxInt = 2
+ v.AuxInt = int32ToAuxInt(2)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
- v1.AuxInt = 1
+ v1.AuxInt = int32ToAuxInt(1)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v2.AddArg3(ptr, v0, mem)
v1.AddArg3(ptr, v0, v2)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (Zero [6] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%2 == 0
+ // cond: t.Alignment()%2 == 0
// result: (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)))
for {
- if v.AuxInt != 6 {
+ if auxIntToInt64(v.AuxInt) != 6 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%2 == 0) {
+ if !(t.Alignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
- v1.AuxInt = 2
+ v1.AuxInt = int32ToAuxInt(2)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v2.AddArg3(ptr, v0, mem)
v1.AddArg3(ptr, v0, v2)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (Zero [12] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVWstore [8] ptr (MOVVconst [0]) (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem)))
for {
- if v.AuxInt != 12 {
+ if auxIntToInt64(v.AuxInt) != 12 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
- v.AuxInt = 8
+ v.AuxInt = int32ToAuxInt(8)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
- v1.AuxInt = 4
+ v1.AuxInt = int32ToAuxInt(4)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v2.AddArg3(ptr, v0, mem)
v1.AddArg3(ptr, v0, v2)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (Zero [16] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%8 == 0
+ // cond: t.Alignment()%8 == 0
// result: (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem))
for {
- if v.AuxInt != 16 {
+ if auxIntToInt64(v.AuxInt) != 16 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%8 == 0) {
+ if !(t.Alignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
- v.AuxInt = 8
+ v.AuxInt = int32ToAuxInt(8)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v1.AddArg3(ptr, v0, mem)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (Zero [24] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%8 == 0
+ // cond: t.Alignment()%8 == 0
// result: (MOVVstore [16] ptr (MOVVconst [0]) (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem)))
for {
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%8 == 0) {
+ if !(t.Alignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
- v.AuxInt = 16
+ v.AuxInt = int32ToAuxInt(16)
v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
- v1.AuxInt = 8
+ v1.AuxInt = int32ToAuxInt(8)
v2 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v2.AddArg3(ptr, v0, mem)
v1.AddArg3(ptr, v0, v2)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.(*types.Type).Alignment()%8 == 0 && !config.noDuffDevice
+ // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice
// result: (DUFFZERO [8 * (128 - s/8)] ptr mem)
for {
- s := v.AuxInt
- t := v.Aux
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(s%8 == 0 && s > 24 && s <= 8*128 && t.(*types.Type).Alignment()%8 == 0 && !config.noDuffDevice) {
+ if !(s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) {
break
}
v.reset(OpMIPS64DUFFZERO)
- v.AuxInt = 8 * (128 - s/8)
+ v.AuxInt = int64ToAuxInt(8 * (128 - s/8))
v.AddArg2(ptr, mem)
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: (s > 8*128 || config.noDuffDevice) || t.(*types.Type).Alignment()%8 != 0
- // result: (LoweredZero [t.(*types.Type).Alignment()] ptr (ADDVconst <ptr.Type> ptr [s-moveSize(t.(*types.Type).Alignment(), config)]) mem)
+ // cond: (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0
+ // result: (LoweredZero [t.Alignment()] ptr (ADDVconst <ptr.Type> ptr [s-moveSize(t.Alignment(), config)]) mem)
for {
- s := v.AuxInt
- t := v.Aux
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !((s > 8*128 || config.noDuffDevice) || t.(*types.Type).Alignment()%8 != 0) {
+ if !((s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0) {
break
}
v.reset(OpMIPS64LoweredZero)
- v.AuxInt = t.(*types.Type).Alignment()
+ v.AuxInt = int64ToAuxInt(t.Alignment())
v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, ptr.Type)
- v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config)
+ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
v0.AddArg(ptr)
v.AddArg3(ptr, v0, mem)
return true
// result: (NE cmp yes no)
for b.Controls[0].Op == OpMIPS64XORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt64(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (NE cmp yes no)
for b.Controls[0].Op == OpMIPS64XORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt64(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (NE cmp yes no)
for b.Controls[0].Op == OpMIPS64XORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt64(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (NE cmp yes no)
for b.Controls[0].Op == OpMIPS64XORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt64(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (NE x yes no)
for b.Controls[0].Op == OpMIPS64SGTUconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt64(v_0.AuxInt) != 1 {
break
}
x := v_0.Args[0]
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 {
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
break
}
b.resetWithControl(BlockMIPS64EQ, x)
// result: (GEZ x yes no)
for b.Controls[0].Op == OpMIPS64SGTconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
x := v_0.Args[0]
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 {
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
break
}
b.resetWithControl(BlockMIPS64LEZ, x)
// result: (First yes no)
for b.Controls[0].Op == OpMIPS64MOVVconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
b.Reset(BlockFirst)
// result: (First no yes)
for b.Controls[0].Op == OpMIPS64MOVVconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if !(c != 0) {
break
}
// result: (First yes no)
for b.Controls[0].Op == OpMIPS64MOVVconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if !(c >= 0) {
break
}
// result: (First no yes)
for b.Controls[0].Op == OpMIPS64MOVVconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if !(c < 0) {
break
}
// result: (First yes no)
for b.Controls[0].Op == OpMIPS64MOVVconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if !(c > 0) {
break
}
// result: (First no yes)
for b.Controls[0].Op == OpMIPS64MOVVconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if !(c <= 0) {
break
}
// result: (First yes no)
for b.Controls[0].Op == OpMIPS64MOVVconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if !(c <= 0) {
break
}
// result: (First no yes)
for b.Controls[0].Op == OpMIPS64MOVVconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if !(c > 0) {
break
}
// result: (First yes no)
for b.Controls[0].Op == OpMIPS64MOVVconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if !(c < 0) {
break
}
// result: (First no yes)
for b.Controls[0].Op == OpMIPS64MOVVconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if !(c >= 0) {
break
}
// result: (EQ cmp yes no)
for b.Controls[0].Op == OpMIPS64XORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt64(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (EQ cmp yes no)
for b.Controls[0].Op == OpMIPS64XORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt64(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (EQ cmp yes no)
for b.Controls[0].Op == OpMIPS64XORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt64(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (EQ cmp yes no)
for b.Controls[0].Op == OpMIPS64XORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt64(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (EQ x yes no)
for b.Controls[0].Op == OpMIPS64SGTUconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt64(v_0.AuxInt) != 1 {
break
}
x := v_0.Args[0]
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 {
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
break
}
b.resetWithControl(BlockMIPS64NE, x)
// result: (LTZ x yes no)
for b.Controls[0].Op == OpMIPS64SGTconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
x := v_0.Args[0]
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 {
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
break
}
b.resetWithControl(BlockMIPS64GTZ, x)
// result: (First no yes)
for b.Controls[0].Op == OpMIPS64MOVVconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt64(v_0.AuxInt) != 0 {
break
}
b.Reset(BlockFirst)
// result: (First yes no)
for b.Controls[0].Op == OpMIPS64MOVVconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
if !(c != 0) {
break
}