// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-(Add(Ptr|32|16|8) ...) -> (ADD ...)
-(Add(32|64)F ...) -> (ADD(F|D) ...)
+(Add(Ptr|32|16|8) ...) => (ADD ...)
+(Add(32|64)F ...) => (ADD(F|D) ...)
-(Select0 (Add32carry <t> x y)) -> (ADD <t.FieldType(0)> x y)
-(Select1 (Add32carry <t> x y)) -> (SGTU <typ.Bool> x (ADD <t.FieldType(0)> x y))
-(Add32withcarry <t> x y c) -> (ADD c (ADD <t> x y))
+(Select0 (Add32carry <t> x y)) => (ADD <t.FieldType(0)> x y)
+(Select1 (Add32carry <t> x y)) => (SGTU <typ.Bool> x (ADD <t.FieldType(0)> x y))
+(Add32withcarry <t> x y c) => (ADD c (ADD <t> x y))
-(Sub(Ptr|32|16|8) ...) -> (SUB ...)
-(Sub(32|64)F ...) -> (SUB(F|D) ...)
+(Sub(Ptr|32|16|8) ...) => (SUB ...)
+(Sub(32|64)F ...) => (SUB(F|D) ...)
-(Select0 (Sub32carry <t> x y)) -> (SUB <t.FieldType(0)> x y)
-(Select1 (Sub32carry <t> x y)) -> (SGTU <typ.Bool> (SUB <t.FieldType(0)> x y) x)
-(Sub32withcarry <t> x y c) -> (SUB (SUB <t> x y) c)
+(Select0 (Sub32carry <t> x y)) => (SUB <t.FieldType(0)> x y)
+(Select1 (Sub32carry <t> x y)) => (SGTU <typ.Bool> (SUB <t.FieldType(0)> x y) x)
+(Sub32withcarry <t> x y c) => (SUB (SUB <t> x y) c)
-(Mul(32|16|8) ...) -> (MUL ...)
-(Mul(32|64)F ...) -> (MUL(F|D) ...)
+(Mul(32|16|8) ...) => (MUL ...)
+(Mul(32|64)F ...) => (MUL(F|D) ...)
-(Hmul(32|32u) x y) -> (Select0 (MUL(T|TU) x y))
-(Mul32uhilo ...) -> (MULTU ...)
+(Hmul(32|32u) x y) => (Select0 (MUL(T|TU) x y))
+(Mul32uhilo ...) => (MULTU ...)
-(Div32 x y) -> (Select1 (DIV x y))
-(Div32u x y) -> (Select1 (DIVU x y))
-(Div16 x y) -> (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y)))
-(Div16u x y) -> (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
-(Div8 x y) -> (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y)))
-(Div8u x y) -> (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
-(Div(32|64)F ...) -> (DIV(F|D) ...)
+(Div32 x y) => (Select1 (DIV x y))
+(Div32u x y) => (Select1 (DIVU x y))
+(Div16 x y) => (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y)))
+(Div16u x y) => (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Div8 x y) => (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y)))
+(Div8u x y) => (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Div(32|64)F ...) => (DIV(F|D) ...)
-(Mod32 x y) -> (Select0 (DIV x y))
-(Mod32u x y) -> (Select0 (DIVU x y))
-(Mod16 x y) -> (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y)))
-(Mod16u x y) -> (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
-(Mod8 x y) -> (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y)))
-(Mod8u x y) -> (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Mod32 x y) => (Select0 (DIV x y))
+(Mod32u x y) => (Select0 (DIVU x y))
+(Mod16 x y) => (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y)))
+(Mod16u x y) => (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Mod8 x y) => (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y)))
+(Mod8u x y) => (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
-// (x + y) / 2 with x>=y -> (x - y) / 2 + y
-(Avg32u <t> x y) -> (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+// (x + y) / 2 with x>=y becomes (x - y) / 2 + y
+(Avg32u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
-(And(32|16|8) ...) -> (AND ...)
-(Or(32|16|8) ...) -> (OR ...)
-(Xor(32|16|8) ...) -> (XOR ...)
+(And(32|16|8) ...) => (AND ...)
+(Or(32|16|8) ...) => (OR ...)
+(Xor(32|16|8) ...) => (XOR ...)
// constant shifts
// generic opt rewrites all constant shifts to shift by Const64
-(Lsh32x64 x (Const64 [c])) && uint32(c) < 32 -> (SLLconst x [c])
-(Rsh32x64 x (Const64 [c])) && uint32(c) < 32 -> (SRAconst x [c])
-(Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 -> (SRLconst x [c])
-(Lsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SLLconst x [c])
-(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SRAconst (SLLconst <typ.UInt32> x [16]) [c+16])
-(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 -> (SRLconst (SLLconst <typ.UInt32> x [16]) [c+16])
-(Lsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SLLconst x [c])
-(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SRAconst (SLLconst <typ.UInt32> x [24]) [c+24])
-(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 -> (SRLconst (SLLconst <typ.UInt32> x [24]) [c+24])
+(Lsh32x64 x (Const64 [c])) && uint32(c) < 32 => (SLLconst x [int32(c)])
+(Rsh32x64 x (Const64 [c])) && uint32(c) < 32 => (SRAconst x [int32(c)])
+(Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 => (SRLconst x [int32(c)])
+(Lsh16x64 x (Const64 [c])) && uint32(c) < 16 => (SLLconst x [int32(c)])
+(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 => (SRLconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+(Lsh8x64 x (Const64 [c])) && uint32(c) < 8 => (SLLconst x [int32(c)])
+(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 => (SRLconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
// large constant shifts
-(Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 -> (MOVWconst [0])
-(Rsh32Ux64 _ (Const64 [c])) && uint32(c) >= 32 -> (MOVWconst [0])
-(Lsh16x64 _ (Const64 [c])) && uint32(c) >= 16 -> (MOVWconst [0])
-(Rsh16Ux64 _ (Const64 [c])) && uint32(c) >= 16 -> (MOVWconst [0])
-(Lsh8x64 _ (Const64 [c])) && uint32(c) >= 8 -> (MOVWconst [0])
-(Rsh8Ux64 _ (Const64 [c])) && uint32(c) >= 8 -> (MOVWconst [0])
+(Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 => (MOVWconst [0])
+(Rsh32Ux64 _ (Const64 [c])) && uint32(c) >= 32 => (MOVWconst [0])
+(Lsh16x64 _ (Const64 [c])) && uint32(c) >= 16 => (MOVWconst [0])
+(Rsh16Ux64 _ (Const64 [c])) && uint32(c) >= 16 => (MOVWconst [0])
+(Lsh8x64 _ (Const64 [c])) && uint32(c) >= 8 => (MOVWconst [0])
+(Rsh8Ux64 _ (Const64 [c])) && uint32(c) >= 8 => (MOVWconst [0])
// large constant signed right shift, we leave the sign bit
-(Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 -> (SRAconst x [31])
-(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 -> (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
-(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 -> (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
+(Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 => (SRAconst x [31])
+(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
+(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
// shifts
// hardware instruction uses only the low 5 bits of the shift
// we compare to 32 to ensure Go semantics for large shifts
-(Lsh32x32 <t> x y) -> (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
-(Lsh32x16 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
-(Lsh32x8 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+(Lsh32x32 <t> x y) => (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+(Lsh32x16 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Lsh32x8 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
-(Lsh16x32 <t> x y) -> (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
-(Lsh16x16 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
-(Lsh16x8 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+(Lsh16x32 <t> x y) => (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+(Lsh16x16 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Lsh16x8 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
-(Lsh8x32 <t> x y) -> (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
-(Lsh8x16 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
-(Lsh8x8 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+(Lsh8x32 <t> x y) => (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+(Lsh8x16 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Lsh8x8 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
-(Rsh32Ux32 <t> x y) -> (CMOVZ (SRL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
-(Rsh32Ux16 <t> x y) -> (CMOVZ (SRL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
-(Rsh32Ux8 <t> x y) -> (CMOVZ (SRL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+(Rsh32Ux32 <t> x y) => (CMOVZ (SRL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+(Rsh32Ux16 <t> x y) => (CMOVZ (SRL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Rsh32Ux8 <t> x y) => (CMOVZ (SRL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
-(Rsh16Ux32 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
-(Rsh16Ux16 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
-(Rsh16Ux8 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+(Rsh16Ux32 <t> x y) => (CMOVZ (SRL <t> (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
+(Rsh16Ux16 <t> x y) => (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Rsh16Ux8 <t> x y) => (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
-(Rsh8Ux32 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
-(Rsh8Ux16 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
-(Rsh8Ux8 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+(Rsh8Ux32 <t> x y) => (CMOVZ (SRL <t> (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
+(Rsh8Ux16 <t> x y) => (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Rsh8Ux8 <t> x y) => (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
-(Rsh32x32 x y) -> (SRA x ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
-(Rsh32x16 x y) -> (SRA x ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
-(Rsh32x8 x y) -> (SRA x ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+(Rsh32x32 x y) => (SRA x ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
+(Rsh32x16 x y) => (SRA x ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh32x8 x y) => (SRA x ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
-(Rsh16x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
-(Rsh16x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
-(Rsh16x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+(Rsh16x32 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
+(Rsh16x16 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh16x8 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
-(Rsh8x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
-(Rsh8x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
-(Rsh8x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+(Rsh8x32 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
+(Rsh8x16 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh8x8 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
// rotates
-(RotateLeft8 <t> x (MOVWconst [c])) -> (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
-(RotateLeft16 <t> x (MOVWconst [c])) -> (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
-(RotateLeft32 <t> x (MOVWconst [c])) -> (Or32 (Lsh32x32 <t> x (MOVWconst [c&31])) (Rsh32Ux32 <t> x (MOVWconst [-c&31])))
-(RotateLeft64 <t> x (MOVWconst [c])) -> (Or64 (Lsh64x32 <t> x (MOVWconst [c&63])) (Rsh64Ux32 <t> x (MOVWconst [-c&63])))
+(RotateLeft8 <t> x (MOVWconst [c])) => (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
+(RotateLeft16 <t> x (MOVWconst [c])) => (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
+(RotateLeft32 <t> x (MOVWconst [c])) => (Or32 (Lsh32x32 <t> x (MOVWconst [c&31])) (Rsh32Ux32 <t> x (MOVWconst [-c&31])))
+(RotateLeft64 <t> x (MOVWconst [c])) => (Or64 (Lsh64x32 <t> x (MOVWconst [c&63])) (Rsh64Ux32 <t> x (MOVWconst [-c&63])))
// unary ops
-(Neg(32|16|8) ...) -> (NEG ...)
-(Neg(32|64)F ...) -> (NEG(F|D) ...)
+(Neg(32|16|8) ...) => (NEG ...)
+(Neg(32|64)F ...) => (NEG(F|D) ...)
-(Com(32|16|8) x) -> (NORconst [0] x)
+(Com(32|16|8) x) => (NORconst [0] x)
-(Sqrt ...) -> (SQRTD ...)
+(Sqrt ...) => (SQRTD ...)
// TODO: optimize this case?
-(Ctz32NonZero ...) -> (Ctz32 ...)
+(Ctz32NonZero ...) => (Ctz32 ...)
// count trailing zero
// 32 - CLZ(x&-x - 1)
-(Ctz32 <t> x) -> (SUB (MOVWconst [32]) (CLZ <t> (SUBconst <t> [1] (AND <t> x (NEG <t> x)))))
+(Ctz32 <t> x) => (SUB (MOVWconst [32]) (CLZ <t> (SUBconst <t> [1] (AND <t> x (NEG <t> x)))))
// bit length
-(BitLen32 <t> x) -> (SUB (MOVWconst [32]) (CLZ <t> x))
+(BitLen32 <t> x) => (SUB (MOVWconst [32]) (CLZ <t> x))
// boolean ops -- booleans are represented with 0=false, 1=true
-(AndB ...) -> (AND ...)
-(OrB ...) -> (OR ...)
-(EqB x y) -> (XORconst [1] (XOR <typ.Bool> x y))
-(NeqB ...) -> (XOR ...)
-(Not x) -> (XORconst [1] x)
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XORconst [1] (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XORconst [1] x)
// constants
-(Const(32|16|8) ...) -> (MOVWconst ...)
-(Const(32|64)F ...) -> (MOV(F|D)const ...)
-(ConstNil) -> (MOVWconst [0])
-(ConstBool ...) -> (MOVWconst ...)
+(Const(32|16|8) [val]) => (MOVWconst [int32(val)])
+(Const(32|64)F ...) => (MOV(F|D)const ...)
+(ConstNil) => (MOVWconst [0])
+(ConstBool [b]) => (MOVWconst [int32(b2i(b))])
// truncations
// Because we ignore high parts of registers, truncates are just copies.
-(Trunc16to8 ...) -> (Copy ...)
-(Trunc32to8 ...) -> (Copy ...)
-(Trunc32to16 ...) -> (Copy ...)
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
// Zero-/Sign-extensions
-(ZeroExt8to16 ...) -> (MOVBUreg ...)
-(ZeroExt8to32 ...) -> (MOVBUreg ...)
-(ZeroExt16to32 ...) -> (MOVHUreg ...)
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
-(SignExt8to16 ...) -> (MOVBreg ...)
-(SignExt8to32 ...) -> (MOVBreg ...)
-(SignExt16to32 ...) -> (MOVHreg ...)
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
-(Signmask x) -> (SRAconst x [31])
-(Zeromask x) -> (NEG (SGTU x (MOVWconst [0])))
-(Slicemask <t> x) -> (SRAconst (NEG <t> x) [31])
+(Signmask x) => (SRAconst x [31])
+(Zeromask x) => (NEG (SGTU x (MOVWconst [0])))
+(Slicemask <t> x) => (SRAconst (NEG <t> x) [31])
-// float <-> int conversion
-(Cvt32to(32|64)F ...) -> (MOVW(F|D) ...)
-(Cvt(32|64)Fto32 ...) -> (TRUNC(F|D)W ...)
-(Cvt32Fto64F ...) -> (MOVFD ...)
-(Cvt64Fto32F ...) -> (MOVDF ...)
+// float-int conversion
+(Cvt32to(32|64)F ...) => (MOVW(F|D) ...)
+(Cvt(32|64)Fto32 ...) => (TRUNC(F|D)W ...)
+(Cvt32Fto64F ...) => (MOVFD ...)
+(Cvt64Fto32F ...) => (MOVDF ...)
-(CvtBoolToUint8 ...) -> (Copy ...)
+(CvtBoolToUint8 ...) => (Copy ...)
-(Round(32|64)F ...) -> (Copy ...)
+(Round(32|64)F ...) => (Copy ...)
// comparisons
-(Eq8 x y) -> (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)))
-(Eq16 x y) -> (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)))
-(Eq32 x y) -> (SGTUconst [1] (XOR x y))
-(EqPtr x y) -> (SGTUconst [1] (XOR x y))
-(Eq(32|64)F x y) -> (FPFlagTrue (CMPEQ(F|D) x y))
-
-(Neq8 x y) -> (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0]))
-(Neq16 x y) -> (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0]))
-(Neq32 x y) -> (SGTU (XOR x y) (MOVWconst [0]))
-(NeqPtr x y) -> (SGTU (XOR x y) (MOVWconst [0]))
-(Neq(32|64)F x y) -> (FPFlagFalse (CMPEQ(F|D) x y))
-
-(Less8 x y) -> (SGT (SignExt8to32 y) (SignExt8to32 x))
-(Less16 x y) -> (SGT (SignExt16to32 y) (SignExt16to32 x))
-(Less32 x y) -> (SGT y x)
-(Less(32|64)F x y) -> (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
-
-(Less8U x y) -> (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))
-(Less16U x y) -> (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))
-(Less32U x y) -> (SGTU y x)
-
-(Leq8 x y) -> (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y)))
-(Leq16 x y) -> (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y)))
-(Leq32 x y) -> (XORconst [1] (SGT x y))
-(Leq(32|64)F x y) -> (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
-
-(Leq8U x y) -> (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)))
-(Leq16U x y) -> (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)))
-(Leq32U x y) -> (XORconst [1] (SGTU x y))
-
-(OffPtr [off] ptr:(SP)) -> (MOVWaddr [off] ptr)
-(OffPtr [off] ptr) -> (ADDconst [off] ptr)
-
-(Addr ...) -> (MOVWaddr ...)
-(LocalAddr {sym} base _) -> (MOVWaddr {sym} base)
+(Eq8 x y) => (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) => (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) => (SGTUconst [1] (XOR x y))
+(EqPtr x y) => (SGTUconst [1] (XOR x y))
+(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y))
+
+(Neq8 x y) => (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0]))
+(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0]))
+(Neq32 x y) => (SGTU (XOR x y) (MOVWconst [0]))
+(NeqPtr x y) => (SGTU (XOR x y) (MOVWconst [0]))
+(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y))
+
+(Less8 x y) => (SGT (SignExt8to32 y) (SignExt8to32 x))
+(Less16 x y) => (SGT (SignExt16to32 y) (SignExt16to32 x))
+(Less32 x y) => (SGT y x)
+(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
+
+(Less8U x y) => (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))
+(Less16U x y) => (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))
+(Less32U x y) => (SGTU y x)
+
+(Leq8 x y) => (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) => (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) => (XORconst [1] (SGT x y))
+(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
+
+(Leq8U x y) => (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) => (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) => (XORconst [1] (SGTU x y))
+
+(OffPtr [off] ptr:(SP)) => (MOVWaddr [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDconst [int32(off)] ptr)
+
+(Addr {sym} base) => (MOVWaddr {sym} base)
+(LocalAddr {sym} base _) => (MOVWaddr {sym} base)
// loads
-(Load <t> ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem)
-(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem)
-(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem)
-(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem)
-(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem)
-(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) -> (MOVWload ptr mem)
-(Load <t> ptr mem) && is32BitFloat(t) -> (MOVFload ptr mem)
-(Load <t> ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem)
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
// stores
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVDstore ptr val mem)
// zero instructions
-(Zero [0] _ mem) -> mem
-(Zero [1] ptr mem) -> (MOVBstore ptr (MOVWconst [0]) mem)
-(Zero [2] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVWconst [0]) mem)
+(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
(MOVHstore ptr (MOVWconst [0]) mem)
-(Zero [2] ptr mem) ->
+(Zero [2] ptr mem) =>
(MOVBstore [1] ptr (MOVWconst [0])
(MOVBstore [0] ptr (MOVWconst [0]) mem))
-(Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
(MOVWstore ptr (MOVWconst [0]) mem)
-(Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
+(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
(MOVHstore [2] ptr (MOVWconst [0])
(MOVHstore [0] ptr (MOVWconst [0]) mem))
-(Zero [4] ptr mem) ->
+(Zero [4] ptr mem) =>
(MOVBstore [3] ptr (MOVWconst [0])
(MOVBstore [2] ptr (MOVWconst [0])
(MOVBstore [1] ptr (MOVWconst [0])
(MOVBstore [0] ptr (MOVWconst [0]) mem))))
-(Zero [3] ptr mem) ->
+(Zero [3] ptr mem) =>
(MOVBstore [2] ptr (MOVWconst [0])
(MOVBstore [1] ptr (MOVWconst [0])
(MOVBstore [0] ptr (MOVWconst [0]) mem)))
-(Zero [6] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
+(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
(MOVHstore [4] ptr (MOVWconst [0])
(MOVHstore [2] ptr (MOVWconst [0])
(MOVHstore [0] ptr (MOVWconst [0]) mem)))
-(Zero [8] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
(MOVWstore [4] ptr (MOVWconst [0])
(MOVWstore [0] ptr (MOVWconst [0]) mem))
-(Zero [12] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
(MOVWstore [8] ptr (MOVWconst [0])
(MOVWstore [4] ptr (MOVWconst [0])
(MOVWstore [0] ptr (MOVWconst [0]) mem)))
-(Zero [16] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Zero [16] {t} ptr mem) && t.Alignment()%4 == 0 =>
(MOVWstore [12] ptr (MOVWconst [0])
(MOVWstore [8] ptr (MOVWconst [0])
(MOVWstore [4] ptr (MOVWconst [0])
// large or unaligned zeroing uses a loop
(Zero [s] {t} ptr mem)
- && (s > 16 || t.(*types.Type).Alignment()%4 != 0) ->
- (LoweredZero [t.(*types.Type).Alignment()]
+ && (s > 16 || t.Alignment()%4 != 0) =>
+ (LoweredZero [int32(t.Alignment())]
ptr
- (ADDconst <ptr.Type> ptr [s-moveSize(t.(*types.Type).Alignment(), config)])
+ (ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))])
mem)
// moves
-(Move [0] _ _ mem) -> mem
-(Move [1] dst src mem) -> (MOVBstore dst (MOVBUload src mem) mem)
-(Move [2] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
+(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
(MOVHstore dst (MOVHUload src mem) mem)
-(Move [2] dst src mem) ->
+(Move [2] dst src mem) =>
(MOVBstore [1] dst (MOVBUload [1] src mem)
(MOVBstore dst (MOVBUload src mem) mem))
-(Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
(MOVWstore dst (MOVWload src mem) mem)
-(Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
+(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
(MOVHstore [2] dst (MOVHUload [2] src mem)
(MOVHstore dst (MOVHUload src mem) mem))
-(Move [4] dst src mem) ->
+(Move [4] dst src mem) =>
(MOVBstore [3] dst (MOVBUload [3] src mem)
(MOVBstore [2] dst (MOVBUload [2] src mem)
(MOVBstore [1] dst (MOVBUload [1] src mem)
(MOVBstore dst (MOVBUload src mem) mem))))
-(Move [3] dst src mem) ->
+(Move [3] dst src mem) =>
(MOVBstore [2] dst (MOVBUload [2] src mem)
(MOVBstore [1] dst (MOVBUload [1] src mem)
(MOVBstore dst (MOVBUload src mem) mem)))
-(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVWstore dst (MOVWload src mem) mem))
-(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
+(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
(MOVHstore [6] dst (MOVHload [6] src mem)
(MOVHstore [4] dst (MOVHload [4] src mem)
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem))))
-(Move [6] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
+(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
(MOVHstore [4] dst (MOVHload [4] src mem)
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem)))
-(Move [12] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
(MOVWstore [8] dst (MOVWload [8] src mem)
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVWstore dst (MOVWload src mem) mem)))
-(Move [16] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
+(Move [16] {t} dst src mem) && t.Alignment()%4 == 0 =>
(MOVWstore [12] dst (MOVWload [12] src mem)
(MOVWstore [8] dst (MOVWload [8] src mem)
(MOVWstore [4] dst (MOVWload [4] src mem)
// large or unaligned move uses a loop
(Move [s] {t} dst src mem)
- && (s > 16 && logLargeCopy(v, s) || t.(*types.Type).Alignment()%4 != 0) ->
- (LoweredMove [t.(*types.Type).Alignment()]
+ && (s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0) =>
+ (LoweredMove [int32(t.Alignment())]
dst
src
- (ADDconst <src.Type> src [s-moveSize(t.(*types.Type).Alignment(), config)])
+ (ADDconst <src.Type> src [int32(s-moveSize(t.Alignment(), config))])
mem)
// calls
-(StaticCall ...) -> (CALLstatic ...)
-(ClosureCall ...) -> (CALLclosure ...)
-(InterCall ...) -> (CALLinter ...)
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
// atomic intrinsics
-(AtomicLoad(8|32) ...) -> (LoweredAtomicLoad(8|32) ...)
-(AtomicLoadPtr ...) -> (LoweredAtomicLoad32 ...)
+(AtomicLoad(8|32) ...) => (LoweredAtomicLoad(8|32) ...)
+(AtomicLoadPtr ...) => (LoweredAtomicLoad32 ...)
-(AtomicStore(8|32) ...) -> (LoweredAtomicStore(8|32) ...)
-(AtomicStorePtrNoWB ...) -> (LoweredAtomicStore32 ...)
+(AtomicStore(8|32) ...) => (LoweredAtomicStore(8|32) ...)
+(AtomicStorePtrNoWB ...) => (LoweredAtomicStore32 ...)
-(AtomicExchange32 ...) -> (LoweredAtomicExchange ...)
-(AtomicAdd32 ...) -> (LoweredAtomicAdd ...)
+(AtomicExchange32 ...) => (LoweredAtomicExchange ...)
+(AtomicAdd32 ...) => (LoweredAtomicAdd ...)
-(AtomicCompareAndSwap32 ...) -> (LoweredAtomicCas ...)
+(AtomicCompareAndSwap32 ...) => (LoweredAtomicCas ...)
// AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8))
(AtomicOr8 ptr val mem) && !config.BigEndian ->
v.Op = OpMIPSADD
return true
case OpAddr:
- v.Op = OpMIPSMOVWaddr
- return true
+ return rewriteValueMIPS_OpAddr(v)
case OpAnd16:
v.Op = OpMIPSAND
return true
case OpCom8:
return rewriteValueMIPS_OpCom8(v)
case OpConst16:
- v.Op = OpMIPSMOVWconst
- return true
+ return rewriteValueMIPS_OpConst16(v)
case OpConst32:
- v.Op = OpMIPSMOVWconst
- return true
+ return rewriteValueMIPS_OpConst32(v)
case OpConst32F:
v.Op = OpMIPSMOVFconst
return true
v.Op = OpMIPSMOVDconst
return true
case OpConst8:
- v.Op = OpMIPSMOVWconst
- return true
+ return rewriteValueMIPS_OpConst8(v)
case OpConstBool:
- v.Op = OpMIPSMOVWconst
- return true
+ return rewriteValueMIPS_OpConstBool(v)
case OpConstNil:
return rewriteValueMIPS_OpConstNil(v)
case OpCtz32:
return true
}
}
+func rewriteValueMIPS_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVWaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpMIPSMOVWaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
y := v_1
v.reset(OpMIPSADD)
v0 := b.NewValue0(v.Pos, OpMIPSSRLconst, t)
- v0.AuxInt = 1
+ v0.AuxInt = int32ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpMIPSSUB, t)
v1.AddArg2(x, y)
v0.AddArg(v1)
x := v_0
v.reset(OpMIPSSUB)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v0.AuxInt = 32
+ v0.AuxInt = int32ToAuxInt(32)
v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t)
v1.AddArg(x)
v.AddArg2(v0, v1)
for {
x := v_0
v.reset(OpMIPSNORconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpMIPSNORconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v.AddArg(x)
return true
}
for {
x := v_0
v.reset(OpMIPSNORconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v.AddArg(x)
return true
}
}
+func rewriteValueMIPS_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueMIPS_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueMIPS_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueMIPS_OpConstBool(v *Value) bool {
+ // match: (ConstBool [b])
+ // result: (MOVWconst [int32(b2i(b))])
+ for {
+ b := auxIntToBool(v.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(b2i(b)))
+ return true
+ }
+}
func rewriteValueMIPS_OpConstNil(v *Value) bool {
// match: (ConstNil)
// result: (MOVWconst [0])
for {
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
}
x := v_0
v.reset(OpMIPSSUB)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v0.AuxInt = 32
+ v0.AuxInt = int32ToAuxInt(32)
v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t)
v2 := b.NewValue0(v.Pos, OpMIPSSUBconst, t)
- v2.AuxInt = 1
+ v2.AuxInt = int32ToAuxInt(1)
v3 := b.NewValue0(v.Pos, OpMIPSAND, t)
v4 := b.NewValue0(v.Pos, OpMIPSNEG, t)
v4.AddArg(x)
x := v_0
y := v_1
v.reset(OpMIPSSGTUconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
x := v_0
y := v_1
v.reset(OpMIPSSGTUconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
v0.AddArg2(x, y)
v.AddArg(v0)
x := v_0
y := v_1
v.reset(OpMIPSSGTUconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
x := v_0
y := v_1
v.reset(OpMIPSXORconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.Bool)
v0.AddArg2(x, y)
v.AddArg(v0)
x := v_0
y := v_1
v.reset(OpMIPSSGTUconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
v0.AddArg2(x, y)
v.AddArg(v0)
x := v_0
y := v_1
v.reset(OpMIPSXORconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
x := v_0
y := v_1
v.reset(OpMIPSXORconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
x := v_0
y := v_1
v.reset(OpMIPSXORconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
v0.AddArg2(x, y)
v.AddArg(v0)
x := v_0
y := v_1
v.reset(OpMIPSXORconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
v0.AddArg2(x, y)
v.AddArg(v0)
x := v_0
y := v_1
v.reset(OpMIPSXORconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
x := v_0
y := v_1
v.reset(OpMIPSXORconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
// match: (LocalAddr {sym} base _)
// result: (MOVWaddr {sym} base)
for {
- sym := v.Aux
+ sym := auxToSym(v.Aux)
base := v_0
v.reset(OpMIPSMOVWaddr)
- v.Aux = sym
+ v.Aux = symToAux(sym)
v.AddArg(base)
return true
}
v1.AddArg(y)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v3.AuxInt = 32
+ v3.AuxInt = int32ToAuxInt(32)
v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v2.AuxInt = 32
+ v2.AuxInt = int32ToAuxInt(32)
v2.AddArg(y)
v.AddArg3(v0, v1, v2)
return true
v_0 := v.Args[0]
// match: (Lsh16x64 x (Const64 [c]))
// cond: uint32(c) < 16
- // result: (SLLconst x [c])
+ // result: (SLLconst x [int32(c)])
for {
x := v_0
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 16) {
break
}
v.reset(OpMIPSSLLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg(x)
return true
}
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) >= 16) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
return false
v1.AddArg(y)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v3.AuxInt = 32
+ v3.AuxInt = int32ToAuxInt(32)
v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v1.AddArg(y)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v3.AuxInt = 32
+ v3.AuxInt = int32ToAuxInt(32)
v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v2.AuxInt = 32
+ v2.AuxInt = int32ToAuxInt(32)
v2.AddArg(y)
v.AddArg3(v0, v1, v2)
return true
v_0 := v.Args[0]
// match: (Lsh32x64 x (Const64 [c]))
// cond: uint32(c) < 32
- // result: (SLLconst x [c])
+ // result: (SLLconst x [int32(c)])
for {
x := v_0
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 32) {
break
}
v.reset(OpMIPSSLLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg(x)
return true
}
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) >= 32) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
return false
v1.AddArg(y)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v3.AuxInt = 32
+ v3.AuxInt = int32ToAuxInt(32)
v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v1.AddArg(y)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v3.AuxInt = 32
+ v3.AuxInt = int32ToAuxInt(32)
v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v2.AuxInt = 32
+ v2.AuxInt = int32ToAuxInt(32)
v2.AddArg(y)
v.AddArg3(v0, v1, v2)
return true
v_0 := v.Args[0]
// match: (Lsh8x64 x (Const64 [c]))
// cond: uint32(c) < 8
- // result: (SLLconst x [c])
+ // result: (SLLconst x [int32(c)])
for {
x := v_0
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 8) {
break
}
v.reset(OpMIPSSLLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg(x)
return true
}
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) >= 8) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
return false
v1.AddArg(y)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v3.AuxInt = 32
+ v3.AuxInt = int32ToAuxInt(32)
v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
// match: (Move [0] _ _ mem)
// result: mem
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
mem := v_2
// match: (Move [1] dst src mem)
// result: (MOVBstore dst (MOVBUload src mem) mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt64(v.AuxInt) != 1 {
break
}
dst := v_0
return true
}
// match: (Move [2] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%2 == 0
+ // cond: t.Alignment()%2 == 0
// result: (MOVHstore dst (MOVHUload src mem) mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt64(v.AuxInt) != 2 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%2 == 0) {
+ if !(t.Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
// match: (Move [2] dst src mem)
// result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))
for {
- if v.AuxInt != 2 {
+ if auxIntToInt64(v.AuxInt) != 2 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpMIPSMOVBstore)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
- v0.AuxInt = 1
+ v0.AuxInt = int32ToAuxInt(1)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVWstore dst (MOVWload src mem) mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%2 == 0
+ // cond: t.Alignment()%2 == 0
// result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%2 == 0) {
+ if !(t.Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
- v.AuxInt = 2
+ v.AuxInt = int32ToAuxInt(2)
v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
- v0.AuxInt = 2
+ v0.AuxInt = int32ToAuxInt(2)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
// match: (Move [4] dst src mem)
// result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))))
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpMIPSMOVBstore)
- v.AuxInt = 3
+ v.AuxInt = int32ToAuxInt(3)
v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
- v0.AuxInt = 3
+ v0.AuxInt = int32ToAuxInt(3)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
- v1.AuxInt = 2
+ v1.AuxInt = int32ToAuxInt(2)
v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
- v2.AuxInt = 2
+ v2.AuxInt = int32ToAuxInt(2)
v2.AddArg2(src, mem)
v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
- v3.AuxInt = 1
+ v3.AuxInt = int32ToAuxInt(1)
v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
- v4.AuxInt = 1
+ v4.AuxInt = int32ToAuxInt(1)
v4.AddArg2(src, mem)
v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v6 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
// match: (Move [3] dst src mem)
// result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))
for {
- if v.AuxInt != 3 {
+ if auxIntToInt64(v.AuxInt) != 3 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpMIPSMOVBstore)
- v.AuxInt = 2
+ v.AuxInt = int32ToAuxInt(2)
v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
- v0.AuxInt = 2
+ v0.AuxInt = int32ToAuxInt(2)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
- v1.AuxInt = 1
+ v1.AuxInt = int32ToAuxInt(1)
v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
- v2.AuxInt = 1
+ v2.AuxInt = int32ToAuxInt(1)
v2.AddArg2(src, mem)
v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
- v0.AuxInt = 4
+ v0.AuxInt = int32ToAuxInt(4)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%2 == 0
+ // cond: t.Alignment()%2 == 0
// result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%2 == 0) {
+ if !(t.Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
- v.AuxInt = 6
+ v.AuxInt = int32ToAuxInt(6)
v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
- v0.AuxInt = 6
+ v0.AuxInt = int32ToAuxInt(6)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
- v1.AuxInt = 4
+ v1.AuxInt = int32ToAuxInt(4)
v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
- v2.AuxInt = 4
+ v2.AuxInt = int32ToAuxInt(4)
v2.AddArg2(src, mem)
v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
- v3.AuxInt = 2
+ v3.AuxInt = int32ToAuxInt(2)
v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
- v4.AuxInt = 2
+ v4.AuxInt = int32ToAuxInt(2)
v4.AddArg2(src, mem)
v5 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v6 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
return true
}
// match: (Move [6] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%2 == 0
+ // cond: t.Alignment()%2 == 0
// result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
for {
- if v.AuxInt != 6 {
+ if auxIntToInt64(v.AuxInt) != 6 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%2 == 0) {
+ if !(t.Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
- v0.AuxInt = 4
+ v0.AuxInt = int32ToAuxInt(4)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
- v1.AuxInt = 2
+ v1.AuxInt = int32ToAuxInt(2)
v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
- v2.AuxInt = 2
+ v2.AuxInt = int32ToAuxInt(2)
v2.AddArg2(src, mem)
v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
return true
}
// match: (Move [12] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
for {
- if v.AuxInt != 12 {
+ if auxIntToInt64(v.AuxInt) != 12 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
- v.AuxInt = 8
+ v.AuxInt = int32ToAuxInt(8)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
- v0.AuxInt = 8
+ v0.AuxInt = int32ToAuxInt(8)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
- v1.AuxInt = 4
+ v1.AuxInt = int32ToAuxInt(4)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
- v2.AuxInt = 4
+ v2.AuxInt = int32ToAuxInt(4)
v2.AddArg2(src, mem)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
return true
}
// match: (Move [16] {t} dst src mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVWstore [12] dst (MOVWload [12] src mem) (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))))
for {
- if v.AuxInt != 16 {
+ if auxIntToInt64(v.AuxInt) != 16 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
- v.AuxInt = 12
+ v.AuxInt = int32ToAuxInt(12)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
- v0.AuxInt = 12
+ v0.AuxInt = int32ToAuxInt(12)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
- v1.AuxInt = 8
+ v1.AuxInt = int32ToAuxInt(8)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
- v2.AuxInt = 8
+ v2.AuxInt = int32ToAuxInt(8)
v2.AddArg2(src, mem)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
- v3.AuxInt = 4
+ v3.AuxInt = int32ToAuxInt(4)
v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
- v4.AuxInt = 4
+ v4.AuxInt = int32ToAuxInt(4)
v4.AddArg2(src, mem)
v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v6 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
return true
}
// match: (Move [s] {t} dst src mem)
- // cond: (s > 16 && logLargeCopy(v, s) || t.(*types.Type).Alignment()%4 != 0)
- // result: (LoweredMove [t.(*types.Type).Alignment()] dst src (ADDconst <src.Type> src [s-moveSize(t.(*types.Type).Alignment(), config)]) mem)
+ // cond: (s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0)
+ // result: (LoweredMove [int32(t.Alignment())] dst src (ADDconst <src.Type> src [int32(s-moveSize(t.Alignment(), config))]) mem)
for {
- s := v.AuxInt
- t := v.Aux
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
- if !(s > 16 && logLargeCopy(v, s) || t.(*types.Type).Alignment()%4 != 0) {
+ if !(s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0) {
break
}
v.reset(OpMIPSLoweredMove)
- v.AuxInt = t.(*types.Type).Alignment()
+ v.AuxInt = int32ToAuxInt(int32(t.Alignment()))
v0 := b.NewValue0(v.Pos, OpMIPSADDconst, src.Type)
- v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config)
+ v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config)))
v0.AddArg(src)
v.AddArg4(dst, src, v0, mem)
return true
v2.AddArg(y)
v0.AddArg2(v1, v2)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v3.AuxInt = 0
+ v3.AuxInt = int32ToAuxInt(0)
v.AddArg2(v0, v3)
return true
}
v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v.AddArg2(v0, v1)
return true
}
v2.AddArg(y)
v0.AddArg2(v1, v2)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v3.AuxInt = 0
+ v3.AuxInt = int32ToAuxInt(0)
v.AddArg2(v0, v3)
return true
}
v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v.AddArg2(v0, v1)
return true
}
for {
x := v_0
v.reset(OpMIPSXORconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v.AddArg(x)
return true
}
func rewriteValueMIPS_OpOffPtr(v *Value) bool {
v_0 := v.Args[0]
// match: (OffPtr [off] ptr:(SP))
- // result: (MOVWaddr [off] ptr)
+ // result: (MOVWaddr [int32(off)] ptr)
for {
- off := v.AuxInt
+ off := auxIntToInt64(v.AuxInt)
ptr := v_0
if ptr.Op != OpSP {
break
}
v.reset(OpMIPSMOVWaddr)
- v.AuxInt = off
+ v.AuxInt = int32ToAuxInt(int32(off))
v.AddArg(ptr)
return true
}
// match: (OffPtr [off] ptr)
- // result: (ADDconst [off] ptr)
+ // result: (ADDconst [int32(off)] ptr)
for {
- off := v.AuxInt
+ off := auxIntToInt64(v.AuxInt)
ptr := v_0
v.reset(OpMIPSADDconst)
- v.AuxInt = off
+ v.AuxInt = int32ToAuxInt(int32(off))
v.AddArg(ptr)
return true
}
if v_1.Op != OpMIPSMOVWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(OpOr16)
v0 := b.NewValue0(v.Pos, OpLsh16x32, t)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = c & 15
+ v1.AuxInt = int32ToAuxInt(c & 15)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpRsh16Ux32, t)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v3.AuxInt = -c & 15
+ v3.AuxInt = int32ToAuxInt(-c & 15)
v2.AddArg2(x, v3)
v.AddArg2(v0, v2)
return true
if v_1.Op != OpMIPSMOVWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(OpOr32)
v0 := b.NewValue0(v.Pos, OpLsh32x32, t)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = c & 31
+ v1.AuxInt = int32ToAuxInt(c & 31)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpRsh32Ux32, t)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v3.AuxInt = -c & 31
+ v3.AuxInt = int32ToAuxInt(-c & 31)
v2.AddArg2(x, v3)
v.AddArg2(v0, v2)
return true
if v_1.Op != OpMIPSMOVWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(OpOr64)
v0 := b.NewValue0(v.Pos, OpLsh64x32, t)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = c & 63
+ v1.AuxInt = int32ToAuxInt(c & 63)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpRsh64Ux32, t)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v3.AuxInt = -c & 63
+ v3.AuxInt = int32ToAuxInt(-c & 63)
v2.AddArg2(x, v3)
v.AddArg2(v0, v2)
return true
if v_1.Op != OpMIPSMOVWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(OpOr8)
v0 := b.NewValue0(v.Pos, OpLsh8x32, t)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = c & 7
+ v1.AuxInt = int32ToAuxInt(c & 7)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpRsh8Ux32, t)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v3.AuxInt = -c & 7
+ v3.AuxInt = int32ToAuxInt(-c & 7)
v2.AddArg2(x, v3)
v.AddArg2(v0, v2)
return true
v2.AddArg(y)
v0.AddArg2(v1, v2)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v3.AuxInt = 0
+ v3.AuxInt = int32ToAuxInt(0)
v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v4.AuxInt = 32
+ v4.AuxInt = int32ToAuxInt(32)
v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(x)
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v3.AuxInt = 32
+ v3.AuxInt = int32ToAuxInt(32)
v3.AddArg(y)
v.AddArg3(v0, v2, v3)
return true
typ := &b.Func.Config.Types
// match: (Rsh16Ux64 x (Const64 [c]))
// cond: uint32(c) < 16
- // result: (SRLconst (SLLconst <typ.UInt32> x [16]) [c+16])
+ // result: (SRLconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
for {
x := v_0
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 16) {
break
}
v.reset(OpMIPSSRLconst)
- v.AuxInt = c + 16
+ v.AuxInt = int32ToAuxInt(int32(c + 16))
v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
- v0.AuxInt = 16
+ v0.AuxInt = int32ToAuxInt(16)
v0.AddArg(x)
v.AddArg(v0)
return true
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) >= 16) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
return false
v2.AddArg(y)
v0.AddArg2(v1, v2)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v3.AuxInt = 0
+ v3.AuxInt = int32ToAuxInt(0)
v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v4.AuxInt = 32
+ v4.AuxInt = int32ToAuxInt(32)
v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v3.AuxInt = -1
+ v3.AuxInt = int32ToAuxInt(-1)
v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v4.AuxInt = 32
+ v4.AuxInt = int32ToAuxInt(32)
v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = -1
+ v2.AuxInt = int32ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v3.AuxInt = 32
+ v3.AuxInt = int32ToAuxInt(32)
v3.AddArg(y)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
typ := &b.Func.Config.Types
// match: (Rsh16x64 x (Const64 [c]))
// cond: uint32(c) < 16
- // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [c+16])
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
for {
x := v_0
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 16) {
break
}
v.reset(OpMIPSSRAconst)
- v.AuxInt = c + 16
+ v.AuxInt = int32ToAuxInt(int32(c + 16))
v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
- v0.AuxInt = 16
+ v0.AuxInt = int32ToAuxInt(16)
v0.AddArg(x)
v.AddArg(v0)
return true
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) >= 16) {
break
}
v.reset(OpMIPSSRAconst)
- v.AuxInt = 31
+ v.AuxInt = int32ToAuxInt(31)
v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
- v0.AuxInt = 16
+ v0.AuxInt = int32ToAuxInt(16)
v0.AddArg(x)
v.AddArg(v0)
return true
v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v3.AuxInt = -1
+ v3.AuxInt = int32ToAuxInt(-1)
v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v4.AuxInt = 32
+ v4.AuxInt = int32ToAuxInt(32)
v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(y)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v3.AuxInt = 32
+ v3.AuxInt = int32ToAuxInt(32)
v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v2.AuxInt = 32
+ v2.AuxInt = int32ToAuxInt(32)
v2.AddArg(y)
v.AddArg3(v0, v1, v2)
return true
v_0 := v.Args[0]
// match: (Rsh32Ux64 x (Const64 [c]))
// cond: uint32(c) < 32
- // result: (SRLconst x [c])
+ // result: (SRLconst x [int32(c)])
for {
x := v_0
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 32) {
break
}
v.reset(OpMIPSSRLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg(x)
return true
}
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) >= 32) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
return false
v1.AddArg(y)
v0.AddArg2(x, v1)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v3.AuxInt = 32
+ v3.AuxInt = int32ToAuxInt(32)
v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = -1
+ v2.AuxInt = int32ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v3.AuxInt = 32
+ v3.AuxInt = int32ToAuxInt(32)
v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.reset(OpMIPSSRA)
v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = -1
+ v1.AuxInt = int32ToAuxInt(-1)
v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v2.AuxInt = 32
+ v2.AuxInt = int32ToAuxInt(32)
v2.AddArg(y)
v0.AddArg3(y, v1, v2)
v.AddArg2(x, v0)
v_0 := v.Args[0]
// match: (Rsh32x64 x (Const64 [c]))
// cond: uint32(c) < 32
- // result: (SRAconst x [c])
+ // result: (SRAconst x [int32(c)])
for {
x := v_0
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 32) {
break
}
v.reset(OpMIPSSRAconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg(x)
return true
}
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) >= 32) {
break
}
v.reset(OpMIPSSRAconst)
- v.AuxInt = 31
+ v.AuxInt = int32ToAuxInt(31)
v.AddArg(x)
return true
}
v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = -1
+ v2.AuxInt = int32ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v3.AuxInt = 32
+ v3.AuxInt = int32ToAuxInt(32)
v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(y)
v0.AddArg2(v1, v2)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v3.AuxInt = 0
+ v3.AuxInt = int32ToAuxInt(0)
v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v4.AuxInt = 32
+ v4.AuxInt = int32ToAuxInt(32)
v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(x)
v0.AddArg2(v1, y)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v3.AuxInt = 32
+ v3.AuxInt = int32ToAuxInt(32)
v3.AddArg(y)
v.AddArg3(v0, v2, v3)
return true
typ := &b.Func.Config.Types
// match: (Rsh8Ux64 x (Const64 [c]))
// cond: uint32(c) < 8
- // result: (SRLconst (SLLconst <typ.UInt32> x [24]) [c+24])
+ // result: (SRLconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
for {
x := v_0
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 8) {
break
}
v.reset(OpMIPSSRLconst)
- v.AuxInt = c + 24
+ v.AuxInt = int32ToAuxInt(int32(c + 24))
v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
- v0.AuxInt = 24
+ v0.AuxInt = int32ToAuxInt(24)
v0.AddArg(x)
v.AddArg(v0)
return true
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) >= 8) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
return false
v2.AddArg(y)
v0.AddArg2(v1, v2)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v3.AuxInt = 0
+ v3.AuxInt = int32ToAuxInt(0)
v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v4.AuxInt = 32
+ v4.AuxInt = int32ToAuxInt(32)
v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v3.AuxInt = -1
+ v3.AuxInt = int32ToAuxInt(-1)
v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v4.AuxInt = 32
+ v4.AuxInt = int32ToAuxInt(32)
v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = -1
+ v2.AuxInt = int32ToAuxInt(-1)
v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v3.AuxInt = 32
+ v3.AuxInt = int32ToAuxInt(32)
v3.AddArg(y)
v1.AddArg3(y, v2, v3)
v.AddArg2(v0, v1)
typ := &b.Func.Config.Types
// match: (Rsh8x64 x (Const64 [c]))
// cond: uint32(c) < 8
- // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [c+24])
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
for {
x := v_0
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) < 8) {
break
}
v.reset(OpMIPSSRAconst)
- v.AuxInt = c + 24
+ v.AuxInt = int32ToAuxInt(int32(c + 24))
v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
- v0.AuxInt = 24
+ v0.AuxInt = int32ToAuxInt(24)
v0.AddArg(x)
v.AddArg(v0)
return true
if v_1.Op != OpConst64 {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if !(uint32(c) >= 8) {
break
}
v.reset(OpMIPSSRAconst)
- v.AuxInt = 31
+ v.AuxInt = int32ToAuxInt(31)
v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
- v0.AuxInt = 24
+ v0.AuxInt = int32ToAuxInt(24)
v0.AddArg(x)
v.AddArg(v0)
return true
v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v3.AuxInt = -1
+ v3.AuxInt = int32ToAuxInt(-1)
v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
- v4.AuxInt = 32
+ v4.AuxInt = int32ToAuxInt(32)
v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
for {
x := v_0
v.reset(OpMIPSSRAconst)
- v.AuxInt = 31
+ v.AuxInt = int32ToAuxInt(31)
v.AddArg(x)
return true
}
t := v.Type
x := v_0
v.reset(OpMIPSSRAconst)
- v.AuxInt = 31
+ v.AuxInt = int32ToAuxInt(31)
v0 := b.NewValue0(v.Pos, OpMIPSNEG, t)
v0.AddArg(x)
v.AddArg(v0)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 1
+ // cond: t.Size() == 1
// result: (MOVBstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 1) {
+ if !(t.Size() == 1) {
break
}
v.reset(OpMIPSMOVBstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 2
+ // cond: t.Size() == 2
// result: (MOVHstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 2) {
+ if !(t.Size() == 2) {
break
}
v.reset(OpMIPSMOVHstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)
+ // cond: t.Size() == 4 && !is32BitFloat(val.Type)
// result: (MOVWstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) {
+ if !(t.Size() == 4 && !is32BitFloat(val.Type)) {
break
}
v.reset(OpMIPSMOVWstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
// result: (MOVFstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpMIPSMOVFstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
// result: (MOVDstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpMIPSMOVDstore)
// match: (Zero [0] _ mem)
// result: mem
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
mem := v_1
// match: (Zero [1] ptr mem)
// result: (MOVBstore ptr (MOVWconst [0]) mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt64(v.AuxInt) != 1 {
break
}
ptr := v_0
mem := v_1
v.reset(OpMIPSMOVBstore)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (Zero [2] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%2 == 0
+ // cond: t.Alignment()%2 == 0
// result: (MOVHstore ptr (MOVWconst [0]) mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt64(v.AuxInt) != 2 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%2 == 0) {
+ if !(t.Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (Zero [2] ptr mem)
// result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))
for {
- if v.AuxInt != 2 {
+ if auxIntToInt64(v.AuxInt) != 2 {
break
}
ptr := v_0
mem := v_1
v.reset(OpMIPSMOVBstore)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v1.AddArg3(ptr, v2, mem)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVWstore ptr (MOVWconst [0]) mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%2 == 0
+ // cond: t.Alignment()%2 == 0
// result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%2 == 0) {
+ if !(t.Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
- v.AuxInt = 2
+ v.AuxInt = int32ToAuxInt(2)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v1.AddArg3(ptr, v2, mem)
v.AddArg3(ptr, v0, v1)
return true
// match: (Zero [4] ptr mem)
// result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))))
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
ptr := v_0
mem := v_1
v.reset(OpMIPSMOVBstore)
- v.AuxInt = 3
+ v.AuxInt = int32ToAuxInt(3)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
- v1.AuxInt = 2
+ v1.AuxInt = int32ToAuxInt(2)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
- v3.AuxInt = 1
+ v3.AuxInt = int32ToAuxInt(1)
v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v4.AuxInt = 0
+ v4.AuxInt = int32ToAuxInt(0)
v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
- v5.AuxInt = 0
+ v5.AuxInt = int32ToAuxInt(0)
v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v6.AuxInt = 0
+ v6.AuxInt = int32ToAuxInt(0)
v5.AddArg3(ptr, v6, mem)
v3.AddArg3(ptr, v4, v5)
v1.AddArg3(ptr, v2, v3)
// match: (Zero [3] ptr mem)
// result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))
for {
- if v.AuxInt != 3 {
+ if auxIntToInt64(v.AuxInt) != 3 {
break
}
ptr := v_0
mem := v_1
v.reset(OpMIPSMOVBstore)
- v.AuxInt = 2
+ v.AuxInt = int32ToAuxInt(2)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
- v1.AuxInt = 1
+ v1.AuxInt = int32ToAuxInt(1)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
- v3.AuxInt = 0
+ v3.AuxInt = int32ToAuxInt(0)
v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v4.AuxInt = 0
+ v4.AuxInt = int32ToAuxInt(0)
v3.AddArg3(ptr, v4, mem)
v1.AddArg3(ptr, v2, v3)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (Zero [6] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%2 == 0
+ // cond: t.Alignment()%2 == 0
// result: (MOVHstore [4] ptr (MOVWconst [0]) (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)))
for {
- if v.AuxInt != 6 {
+ if auxIntToInt64(v.AuxInt) != 6 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%2 == 0) {
+ if !(t.Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
- v1.AuxInt = 2
+ v1.AuxInt = int32ToAuxInt(2)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
- v3.AuxInt = 0
+ v3.AuxInt = int32ToAuxInt(0)
v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v4.AuxInt = 0
+ v4.AuxInt = int32ToAuxInt(0)
v3.AddArg3(ptr, v4, mem)
v1.AddArg3(ptr, v2, v3)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (Zero [8] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v1.AddArg3(ptr, v2, mem)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (Zero [12] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem)))
for {
- if v.AuxInt != 12 {
+ if auxIntToInt64(v.AuxInt) != 12 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
- v.AuxInt = 8
+ v.AuxInt = int32ToAuxInt(8)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
- v1.AuxInt = 4
+ v1.AuxInt = int32ToAuxInt(4)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
- v3.AuxInt = 0
+ v3.AuxInt = int32ToAuxInt(0)
v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v4.AuxInt = 0
+ v4.AuxInt = int32ToAuxInt(0)
v3.AddArg3(ptr, v4, mem)
v1.AddArg3(ptr, v2, v3)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (Zero [16] {t} ptr mem)
- // cond: t.(*types.Type).Alignment()%4 == 0
+ // cond: t.Alignment()%4 == 0
// result: (MOVWstore [12] ptr (MOVWconst [0]) (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))))
for {
- if v.AuxInt != 16 {
+ if auxIntToInt64(v.AuxInt) != 16 {
break
}
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(t.(*types.Type).Alignment()%4 == 0) {
+ if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
- v.AuxInt = 12
+ v.AuxInt = int32ToAuxInt(12)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
- v1.AuxInt = 8
+ v1.AuxInt = int32ToAuxInt(8)
v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v2.AuxInt = 0
+ v2.AuxInt = int32ToAuxInt(0)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
- v3.AuxInt = 4
+ v3.AuxInt = int32ToAuxInt(4)
v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v4.AuxInt = 0
+ v4.AuxInt = int32ToAuxInt(0)
v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
- v5.AuxInt = 0
+ v5.AuxInt = int32ToAuxInt(0)
v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v6.AuxInt = 0
+ v6.AuxInt = int32ToAuxInt(0)
v5.AddArg3(ptr, v6, mem)
v3.AddArg3(ptr, v4, v5)
v1.AddArg3(ptr, v2, v3)
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: (s > 16 || t.(*types.Type).Alignment()%4 != 0)
- // result: (LoweredZero [t.(*types.Type).Alignment()] ptr (ADDconst <ptr.Type> ptr [s-moveSize(t.(*types.Type).Alignment(), config)]) mem)
+ // cond: (s > 16 || t.Alignment()%4 != 0)
+ // result: (LoweredZero [int32(t.Alignment())] ptr (ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))]) mem)
for {
- s := v.AuxInt
- t := v.Aux
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(s > 16 || t.(*types.Type).Alignment()%4 != 0) {
+ if !(s > 16 || t.Alignment()%4 != 0) {
break
}
v.reset(OpMIPSLoweredZero)
- v.AuxInt = t.(*types.Type).Alignment()
+ v.AuxInt = int32ToAuxInt(int32(t.Alignment()))
v0 := b.NewValue0(v.Pos, OpMIPSADDconst, ptr.Type)
- v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config)
+ v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config)))
v0.AddArg(ptr)
v.AddArg3(ptr, v0, mem)
return true
v.reset(OpMIPSNEG)
v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v0.AddArg2(x, v1)
v.AddArg(v0)
return true