(MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
// Strip off any fractional word zeroing.
-(Zero [s] destptr mem) && s%4 != 0 && s > 4 ->
- (Zero [s-s%4] (ADDLconst destptr [s%4])
+(Zero [s] destptr mem) && s%4 != 0 && s > 4 =>
+ (Zero [s-s%4] (ADDLconst destptr [int32(s%4)])
(MOVLstoreconst [0] destptr mem))
// Zero small numbers of words directly.
// TODO: Should the optimizations be a separate pass?
// Fold boolean tests into blocks
-(NE (TESTB (SETL cmp) (SETL cmp)) yes no) -> (LT cmp yes no)
-(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) -> (LE cmp yes no)
-(NE (TESTB (SETG cmp) (SETG cmp)) yes no) -> (GT cmp yes no)
-(NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) -> (GE cmp yes no)
-(NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) -> (EQ cmp yes no)
-(NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) -> (NE cmp yes no)
-(NE (TESTB (SETB cmp) (SETB cmp)) yes no) -> (ULT cmp yes no)
-(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no)
-(NE (TESTB (SETA cmp) (SETA cmp)) yes no) -> (UGT cmp yes no)
-(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no)
-(NE (TESTB (SETO cmp) (SETO cmp)) yes no) -> (OS cmp yes no)
+(NE (TESTB (SETL cmp) (SETL cmp)) yes no) => (LT cmp yes no)
+(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) => (LE cmp yes no)
+(NE (TESTB (SETG cmp) (SETG cmp)) yes no) => (GT cmp yes no)
+(NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) => (GE cmp yes no)
+(NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) => (EQ cmp yes no)
+(NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) => (NE cmp yes no)
+(NE (TESTB (SETB cmp) (SETB cmp)) yes no) => (ULT cmp yes no)
+(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) => (ULE cmp yes no)
+(NE (TESTB (SETA cmp) (SETA cmp)) yes no) => (UGT cmp yes no)
+(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) => (UGE cmp yes no)
+(NE (TESTB (SETO cmp) (SETO cmp)) yes no) => (OS cmp yes no)
// Special case for floating point - LF/LEF not generated
-(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) -> (UGT cmp yes no)
-(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) -> (UGE cmp yes no)
-(NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) -> (EQF cmp yes no)
-(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF cmp yes no)
+(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) => (UGT cmp yes no)
+(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) => (UGE cmp yes no)
+(NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) => (EQF cmp yes no)
+(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF cmp yes no)
// fold constants into instructions
-(ADDL x (MOVLconst [c])) -> (ADDLconst [c] x)
-(ADDLcarry x (MOVLconst [c])) -> (ADDLconstcarry [c] x)
-(ADCL x (MOVLconst [c]) f) -> (ADCLconst [c] x f)
+(ADDL x (MOVLconst [c])) => (ADDLconst [c] x)
+(ADDLcarry x (MOVLconst [c])) => (ADDLconstcarry [c] x)
+(ADCL x (MOVLconst [c]) f) => (ADCLconst [c] x f)
-(SUBL x (MOVLconst [c])) -> (SUBLconst x [c])
-(SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst <v.Type> x [c]))
-(SUBLcarry x (MOVLconst [c])) -> (SUBLconstcarry [c] x)
-(SBBL x (MOVLconst [c]) f) -> (SBBLconst [c] x f)
+(SUBL x (MOVLconst [c])) => (SUBLconst x [c])
+(SUBL (MOVLconst [c]) x) => (NEGL (SUBLconst <v.Type> x [c]))
+(SUBLcarry x (MOVLconst [c])) => (SUBLconstcarry [c] x)
+(SBBL x (MOVLconst [c]) f) => (SBBLconst [c] x f)
-(MULL x (MOVLconst [c])) -> (MULLconst [c] x)
+(MULL x (MOVLconst [c])) => (MULLconst [c] x)
+(ANDL x (MOVLconst [c])) => (ANDLconst [c] x)
-(ANDL x (MOVLconst [c])) -> (ANDLconst [c] x)
+(ANDLconst [c] (ANDLconst [d] x)) => (ANDLconst [c & d] x)
+(XORLconst [c] (XORLconst [d] x)) => (XORLconst [c ^ d] x)
+(MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x)
-(ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x)
+(ORL x (MOVLconst [c])) => (ORLconst [c] x)
+(XORL x (MOVLconst [c])) => (XORLconst [c] x)
-(XORLconst [c] (XORLconst [d] x)) -> (XORLconst [c ^ d] x)
+(SHLL x (MOVLconst [c])) => (SHLLconst [c&31] x)
+(SHRL x (MOVLconst [c])) => (SHRLconst [c&31] x)
+(SHRW x (MOVLconst [c])) && c&31 < 16 => (SHRWconst [int16(c&31)] x)
+(SHRW _ (MOVLconst [c])) && c&31 >= 16 => (MOVLconst [0])
+(SHRB x (MOVLconst [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x)
+(SHRB _ (MOVLconst [c])) && c&31 >= 8 => (MOVLconst [0])
-(MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x)
+(SARL x (MOVLconst [c])) => (SARLconst [c&31] x)
+(SARW x (MOVLconst [c])) => (SARWconst [int16(min(int64(c&31),15))] x)
+(SARB x (MOVLconst [c])) => (SARBconst [int8(min(int64(c&31),7))] x)
-(ORL x (MOVLconst [c])) -> (ORLconst [c] x)
-
-(XORL x (MOVLconst [c])) -> (XORLconst [c] x)
-
-(SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x)
-(SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x)
-(SHRW x (MOVLconst [c])) && c&31 < 16 -> (SHRWconst [c&31] x)
-(SHRW _ (MOVLconst [c])) && c&31 >= 16 -> (MOVLconst [0])
-(SHRB x (MOVLconst [c])) && c&31 < 8 -> (SHRBconst [c&31] x)
-(SHRB _ (MOVLconst [c])) && c&31 >= 8 -> (MOVLconst [0])
-
-(SARL x (MOVLconst [c])) -> (SARLconst [c&31] x)
-(SARW x (MOVLconst [c])) -> (SARWconst [min(c&31,15)] x)
-(SARB x (MOVLconst [c])) -> (SARBconst [min(c&31,7)] x)
-
-(SARL x (ANDLconst [31] y)) -> (SARL x y)
-
-(SHLL x (ANDLconst [31] y)) -> (SHLL x y)
-
-(SHRL x (ANDLconst [31] y)) -> (SHRL x y)
+(SARL x (ANDLconst [31] y)) => (SARL x y)
+(SHLL x (ANDLconst [31] y)) => (SHLL x y)
+(SHRL x (ANDLconst [31] y)) => (SHRL x y)
// Rotate instructions
-(ADDL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c -> (ROLLconst [c] x)
-( ORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c -> (ROLLconst [c] x)
-(XORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c -> (ROLLconst [c] x)
+(ADDL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c => (ROLLconst [c] x)
+( ORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c => (ROLLconst [c] x)
+(XORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c => (ROLLconst [c] x)
+
+(ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == int16(16-c) && t.Size() == 2
+ => (ROLWconst x [int16(c)])
+( ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == int16(16-c) && t.Size() == 2
+ => (ROLWconst x [int16(c)])
+(XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == int16(16-c) && t.Size() == 2
+ => (ROLWconst x [int16(c)])
-(ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.Size() == 2 -> (ROLWconst x [c])
-( ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.Size() == 2 -> (ROLWconst x [c])
-(XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.Size() == 2 -> (ROLWconst x [c])
+(ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == int8(8-c) && t.Size() == 1
+ => (ROLBconst x [int8(c)])
+( ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == int8(8-c) && t.Size() == 1
+ => (ROLBconst x [int8(c)])
+(XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == int8(8-c) && t.Size() == 1
+ => (ROLBconst x [int8(c)])
-(ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.Size() == 1 -> (ROLBconst x [c])
-( ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.Size() == 1 -> (ROLBconst x [c])
-(XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.Size() == 1 -> (ROLBconst x [c])
+(ROLLconst [c] (ROLLconst [d] x)) => (ROLLconst [(c+d)&31] x)
+(ROLWconst [c] (ROLWconst [d] x)) => (ROLWconst [(c+d)&15] x)
+(ROLBconst [c] (ROLBconst [d] x)) => (ROLBconst [(c+d)& 7] x)
-(ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x)
-(ROLWconst [c] (ROLWconst [d] x)) -> (ROLWconst [(c+d)&15] x)
-(ROLBconst [c] (ROLBconst [d] x)) -> (ROLBconst [(c+d)& 7] x)
// Constant shift simplifications
-(SHLLconst x [0]) -> x
-(SHRLconst x [0]) -> x
-(SARLconst x [0]) -> x
+(SHLLconst x [0]) => x
+(SHRLconst x [0]) => x
+(SARLconst x [0]) => x
-(SHRWconst x [0]) -> x
-(SARWconst x [0]) -> x
+(SHRWconst x [0]) => x
+(SARWconst x [0]) => x
-(SHRBconst x [0]) -> x
-(SARBconst x [0]) -> x
+(SHRBconst x [0]) => x
+(SARBconst x [0]) => x
-(ROLLconst [0] x) -> x
-(ROLWconst [0] x) -> x
-(ROLBconst [0] x) -> x
+(ROLLconst [0] x) => x
+(ROLWconst [0] x) => x
+(ROLBconst [0] x) => x
// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
// because the x86 instructions are defined to use all 5 bits of the shift even
// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
// (SHRW x (MOVLconst [24])), but just in case.
-(CMPL x (MOVLconst [c])) -> (CMPLconst x [c])
-(CMPL (MOVLconst [c]) x) -> (InvertFlags (CMPLconst x [c]))
-(CMPW x (MOVLconst [c])) -> (CMPWconst x [int64(int16(c))])
-(CMPW (MOVLconst [c]) x) -> (InvertFlags (CMPWconst x [int64(int16(c))]))
-(CMPB x (MOVLconst [c])) -> (CMPBconst x [int64(int8(c))])
-(CMPB (MOVLconst [c]) x) -> (InvertFlags (CMPBconst x [int64(int8(c))]))
+(CMPL x (MOVLconst [c])) => (CMPLconst x [c])
+(CMPL (MOVLconst [c]) x) => (InvertFlags (CMPLconst x [c]))
+(CMPW x (MOVLconst [c])) => (CMPWconst x [int16(c)])
+(CMPW (MOVLconst [c]) x) => (InvertFlags (CMPWconst x [int16(c)]))
+(CMPB x (MOVLconst [c])) => (CMPBconst x [int8(c)])
+(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
// Canonicalize the order of arguments to comparisons - helps with CSE.
-(CMP(L|W|B) x y) && x.ID > y.ID -> (InvertFlags (CMP(L|W|B) y x))
+(CMP(L|W|B) x y) && x.ID > y.ID => (InvertFlags (CMP(L|W|B) y x))
// strength reduction
// Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
// which can require a register-register move
// to preserve the original value,
// so it must be used with care.
-(MULLconst [-9] x) -> (NEGL (LEAL8 <v.Type> x x))
-(MULLconst [-5] x) -> (NEGL (LEAL4 <v.Type> x x))
-(MULLconst [-3] x) -> (NEGL (LEAL2 <v.Type> x x))
-(MULLconst [-1] x) -> (NEGL x)
-(MULLconst [0] _) -> (MOVLconst [0])
-(MULLconst [1] x) -> x
-(MULLconst [3] x) -> (LEAL2 x x)
-(MULLconst [5] x) -> (LEAL4 x x)
-(MULLconst [7] x) -> (LEAL2 x (LEAL2 <v.Type> x x))
-(MULLconst [9] x) -> (LEAL8 x x)
-(MULLconst [11] x) -> (LEAL2 x (LEAL4 <v.Type> x x))
-(MULLconst [13] x) -> (LEAL4 x (LEAL2 <v.Type> x x))
-(MULLconst [19] x) -> (LEAL2 x (LEAL8 <v.Type> x x))
-(MULLconst [21] x) -> (LEAL4 x (LEAL4 <v.Type> x x))
-(MULLconst [25] x) -> (LEAL8 x (LEAL2 <v.Type> x x))
-(MULLconst [27] x) -> (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x))
-(MULLconst [37] x) -> (LEAL4 x (LEAL8 <v.Type> x x))
-(MULLconst [41] x) -> (LEAL8 x (LEAL4 <v.Type> x x))
-(MULLconst [45] x) -> (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x))
-(MULLconst [73] x) -> (LEAL8 x (LEAL8 <v.Type> x x))
-(MULLconst [81] x) -> (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x))
-
-(MULLconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBL (SHLLconst <v.Type> [log2(c+1)] x) x)
-(MULLconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (LEAL1 (SHLLconst <v.Type> [log2(c-1)] x) x)
-(MULLconst [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEAL2 (SHLLconst <v.Type> [log2(c-2)] x) x)
-(MULLconst [c] x) && isPowerOfTwo(c-4) && c >= 68 -> (LEAL4 (SHLLconst <v.Type> [log2(c-4)] x) x)
-(MULLconst [c] x) && isPowerOfTwo(c-8) && c >= 136 -> (LEAL8 (SHLLconst <v.Type> [log2(c-8)] x) x)
-(MULLconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3) -> (SHLLconst [log2(c/3)] (LEAL2 <v.Type> x x))
-(MULLconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5) -> (SHLLconst [log2(c/5)] (LEAL4 <v.Type> x x))
-(MULLconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9) -> (SHLLconst [log2(c/9)] (LEAL8 <v.Type> x x))
+(MULLconst [-9] x) => (NEGL (LEAL8 <v.Type> x x))
+(MULLconst [-5] x) => (NEGL (LEAL4 <v.Type> x x))
+(MULLconst [-3] x) => (NEGL (LEAL2 <v.Type> x x))
+(MULLconst [-1] x) => (NEGL x)
+(MULLconst [0] _) => (MOVLconst [0])
+(MULLconst [1] x) => x
+(MULLconst [3] x) => (LEAL2 x x)
+(MULLconst [5] x) => (LEAL4 x x)
+(MULLconst [7] x) => (LEAL2 x (LEAL2 <v.Type> x x))
+(MULLconst [9] x) => (LEAL8 x x)
+(MULLconst [11] x) => (LEAL2 x (LEAL4 <v.Type> x x))
+(MULLconst [13] x) => (LEAL4 x (LEAL2 <v.Type> x x))
+(MULLconst [19] x) => (LEAL2 x (LEAL8 <v.Type> x x))
+(MULLconst [21] x) => (LEAL4 x (LEAL4 <v.Type> x x))
+(MULLconst [25] x) => (LEAL8 x (LEAL2 <v.Type> x x))
+(MULLconst [27] x) => (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x))
+(MULLconst [37] x) => (LEAL4 x (LEAL8 <v.Type> x x))
+(MULLconst [41] x) => (LEAL8 x (LEAL4 <v.Type> x x))
+(MULLconst [45] x) => (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x))
+(MULLconst [73] x) => (LEAL8 x (LEAL8 <v.Type> x x))
+(MULLconst [81] x) => (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x))
+
+(MULLconst [c] x) && isPowerOfTwo32(c+1) && c >= 15 => (SUBL (SHLLconst <v.Type> [int32(log32(c+1))] x) x)
+(MULLconst [c] x) && isPowerOfTwo32(c-1) && c >= 17 => (LEAL1 (SHLLconst <v.Type> [int32(log32(c-1))] x) x)
+(MULLconst [c] x) && isPowerOfTwo32(c-2) && c >= 34 => (LEAL2 (SHLLconst <v.Type> [int32(log32(c-2))] x) x)
+(MULLconst [c] x) && isPowerOfTwo32(c-4) && c >= 68 => (LEAL4 (SHLLconst <v.Type> [int32(log32(c-4))] x) x)
+(MULLconst [c] x) && isPowerOfTwo32(c-8) && c >= 136 => (LEAL8 (SHLLconst <v.Type> [int32(log32(c-8))] x) x)
+(MULLconst [c] x) && c%3 == 0 && isPowerOfTwo32(c/3) => (SHLLconst [int32(log32(c/3))] (LEAL2 <v.Type> x x))
+(MULLconst [c] x) && c%5 == 0 && isPowerOfTwo32(c/5) => (SHLLconst [int32(log32(c/5))] (LEAL4 <v.Type> x x))
+(MULLconst [c] x) && c%9 == 0 && isPowerOfTwo32(c/9) => (SHLLconst [int32(log32(c/9))] (LEAL8 <v.Type> x x))
// combine add/shift into LEAL
-(ADDL x (SHLLconst [3] y)) -> (LEAL8 x y)
-(ADDL x (SHLLconst [2] y)) -> (LEAL4 x y)
-(ADDL x (SHLLconst [1] y)) -> (LEAL2 x y)
-(ADDL x (ADDL y y)) -> (LEAL2 x y)
-(ADDL x (ADDL x y)) -> (LEAL2 y x)
+(ADDL x (SHLLconst [3] y)) => (LEAL8 x y)
+(ADDL x (SHLLconst [2] y)) => (LEAL4 x y)
+(ADDL x (SHLLconst [1] y)) => (LEAL2 x y)
+(ADDL x (ADDL y y)) => (LEAL2 x y)
+(ADDL x (ADDL x y)) => (LEAL2 y x)
// combine ADDL/ADDLconst into LEAL1
-(ADDLconst [c] (ADDL x y)) -> (LEAL1 [c] x y)
-(ADDL (ADDLconst [c] x) y) -> (LEAL1 [c] x y)
+(ADDLconst [c] (ADDL x y)) => (LEAL1 [c] x y)
+(ADDL (ADDLconst [c] x) y) => (LEAL1 [c] x y)
// fold ADDL into LEAL
-(ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
-(LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
-(LEAL [c] {s} (ADDL x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAL1 [c] {s} x y)
-(ADDL x (LEAL [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAL1 [c] {s} x y)
+(ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(int64(c)+int64(d)) => (LEAL [c+d] {s} x)
+(LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(int64(c)+int64(d)) => (LEAL [c+d] {s} x)
+(LEAL [c] {s} (ADDL x y)) && x.Op != OpSB && y.Op != OpSB => (LEAL1 [c] {s} x y)
+(ADDL x (LEAL [c] {s} y)) && x.Op != OpSB && y.Op != OpSB => (LEAL1 [c] {s} x y)
// fold ADDLconst into LEALx
-(ADDLconst [c] (LEAL1 [d] {s} x y)) && is32Bit(c+d) -> (LEAL1 [c+d] {s} x y)
-(ADDLconst [c] (LEAL2 [d] {s} x y)) && is32Bit(c+d) -> (LEAL2 [c+d] {s} x y)
-(ADDLconst [c] (LEAL4 [d] {s} x y)) && is32Bit(c+d) -> (LEAL4 [c+d] {s} x y)
-(ADDLconst [c] (LEAL8 [d] {s} x y)) && is32Bit(c+d) -> (LEAL8 [c+d] {s} x y)
-(LEAL1 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAL1 [c+d] {s} x y)
-(LEAL2 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAL2 [c+d] {s} x y)
-(LEAL2 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEAL2 [c+2*d] {s} x y)
-(LEAL4 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAL4 [c+d] {s} x y)
-(LEAL4 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+4*d) && y.Op != OpSB -> (LEAL4 [c+4*d] {s} x y)
-(LEAL8 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAL8 [c+d] {s} x y)
-(LEAL8 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+8*d) && y.Op != OpSB -> (LEAL8 [c+8*d] {s} x y)
+(ADDLconst [c] (LEAL1 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL1 [c+d] {s} x y)
+(ADDLconst [c] (LEAL2 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL2 [c+d] {s} x y)
+(ADDLconst [c] (LEAL4 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL4 [c+d] {s} x y)
+(ADDLconst [c] (LEAL8 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL8 [c+d] {s} x y)
+(LEAL1 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL1 [c+d] {s} x y)
+(LEAL2 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL2 [c+d] {s} x y)
+(LEAL2 [c] {s} x (ADDLconst [d] y)) && is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB => (LEAL2 [c+2*d] {s} x y)
+(LEAL4 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL4 [c+d] {s} x y)
+(LEAL4 [c] {s} x (ADDLconst [d] y)) && is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB => (LEAL4 [c+4*d] {s} x y)
+(LEAL8 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL8 [c+d] {s} x y)
+(LEAL8 [c] {s} x (ADDLconst [d] y)) && is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB => (LEAL8 [c+8*d] {s} x y)
// fold shifts into LEALx
-(LEAL1 [c] {s} x (SHLLconst [1] y)) -> (LEAL2 [c] {s} x y)
-(LEAL1 [c] {s} x (SHLLconst [2] y)) -> (LEAL4 [c] {s} x y)
-(LEAL1 [c] {s} x (SHLLconst [3] y)) -> (LEAL8 [c] {s} x y)
-(LEAL2 [c] {s} x (SHLLconst [1] y)) -> (LEAL4 [c] {s} x y)
-(LEAL2 [c] {s} x (SHLLconst [2] y)) -> (LEAL8 [c] {s} x y)
-(LEAL4 [c] {s} x (SHLLconst [1] y)) -> (LEAL8 [c] {s} x y)
+(LEAL1 [c] {s} x (SHLLconst [1] y)) => (LEAL2 [c] {s} x y)
+(LEAL1 [c] {s} x (SHLLconst [2] y)) => (LEAL4 [c] {s} x y)
+(LEAL1 [c] {s} x (SHLLconst [3] y)) => (LEAL8 [c] {s} x y)
+(LEAL2 [c] {s} x (SHLLconst [1] y)) => (LEAL4 [c] {s} x y)
+(LEAL2 [c] {s} x (SHLLconst [2] y)) => (LEAL8 [c] {s} x y)
+(LEAL4 [c] {s} x (SHLLconst [1] y)) => (LEAL8 [c] {s} x y)
// reverse ordering of compare instruction
-(SETL (InvertFlags x)) -> (SETG x)
-(SETG (InvertFlags x)) -> (SETL x)
-(SETB (InvertFlags x)) -> (SETA x)
-(SETA (InvertFlags x)) -> (SETB x)
-(SETLE (InvertFlags x)) -> (SETGE x)
-(SETGE (InvertFlags x)) -> (SETLE x)
-(SETBE (InvertFlags x)) -> (SETAE x)
-(SETAE (InvertFlags x)) -> (SETBE x)
-(SETEQ (InvertFlags x)) -> (SETEQ x)
-(SETNE (InvertFlags x)) -> (SETNE x)
+(SETL (InvertFlags x)) => (SETG x)
+(SETG (InvertFlags x)) => (SETL x)
+(SETB (InvertFlags x)) => (SETA x)
+(SETA (InvertFlags x)) => (SETB x)
+(SETLE (InvertFlags x)) => (SETGE x)
+(SETGE (InvertFlags x)) => (SETLE x)
+(SETBE (InvertFlags x)) => (SETAE x)
+(SETAE (InvertFlags x)) => (SETBE x)
+(SETEQ (InvertFlags x)) => (SETEQ x)
+(SETNE (InvertFlags x)) => (SETNE x)
// sign extended loads
// Note: The combined instruction must end up in the same block
// Make sure we don't combine these ops if the load has another use.
// This prevents a single load from being split into multiple loads
// which then might return different values. See test/atomicload.go.
-(MOVBLSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBLSXload <v.Type> [off] {sym} ptr mem)
-(MOVBLZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
-(MOVWLSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWLSXload <v.Type> [off] {sym} ptr mem)
-(MOVWLZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+(MOVBLSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBLSXload <v.Type> [off] {sym} ptr mem)
+(MOVBLZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVWLSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWLSXload <v.Type> [off] {sym} ptr mem)
+(MOVWLZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
-(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBLZX x)
-(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWLZX x)
-(MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
-(MOVBLSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBLSX x)
-(MOVWLSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWLSX x)
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBLZX x)
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWLZX x)
+(MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVBLSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBLSX x)
+(MOVWLSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWLSX x)
// Fold extensions and ANDs together.
-(MOVBLZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xff] x)
-(MOVWLZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xffff] x)
-(MOVBLSX (ANDLconst [c] x)) && c & 0x80 == 0 -> (ANDLconst [c & 0x7f] x)
-(MOVWLSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x)
+(MOVBLZX (ANDLconst [c] x)) => (ANDLconst [c & 0xff] x)
+(MOVWLZX (ANDLconst [c] x)) => (ANDLconst [c & 0xffff] x)
+(MOVBLSX (ANDLconst [c] x)) && c & 0x80 == 0 => (ANDLconst [c & 0x7f] x)
+(MOVWLSX (ANDLconst [c] x)) && c & 0x8000 == 0 => (ANDLconst [c & 0x7fff] x)
// Don't extend before storing
-(MOVWstore [off] {sym} ptr (MOVWL(S|Z)X x) mem) -> (MOVWstore [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr (MOVBL(S|Z)X x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWL(S|Z)X x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBL(S|Z)X x) mem) => (MOVBstore [off] {sym} ptr x mem)
// fold constants into memory operations
// Note that this is not always a good idea because if not all the uses of
// the ADDLconst get eliminated, we still have to compute the ADDLconst and we now
// have potentially two live values (ptr and (ADDLconst [off] ptr)) instead of one.
// Nevertheless, let's do it!
-(MOV(L|W|B|SS|SD)load [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOV(L|W|B|SS|SD)load [off1+off2] {sym} ptr mem)
-(MOV(L|W|B|SS|SD)store [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOV(L|W|B|SS|SD)store [off1+off2] {sym} ptr val mem)
+(MOV(L|W|B|SS|SD)load [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(L|W|B|SS|SD)load [off1+off2] {sym} ptr mem)
+(MOV(L|W|B|SS|SD)store [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(L|W|B|SS|SD)store [off1+off2] {sym} ptr val mem)
-((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(off1+off2) ->
+((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
-((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(off1+off2) ->
+((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
-((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(off1+off2) ->
+((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem)
-((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDLconst [off2] base) val mem) && is32Bit(off1+off2) ->
+((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDLconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem)
-((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
- ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) && valoff1.canAdd32(off2) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
// Fold constants into stores.
-(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
- (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
-(MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
- (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
-(MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
- (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
+(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(int64(off)) =>
+ (MOVLstoreconst [makeValAndOff32(c,off)] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(int64(off)) =>
+ (MOVWstoreconst [makeValAndOff32(c,off)] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(int64(off)) =>
+ (MOVBstoreconst [makeValAndOff32(c,off)] {sym} ptr mem)
// Fold address offsets into constant stores.
-(MOV(L|W|B)storeconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
- (MOV(L|W|B)storeconst [ValAndOff(sc).add(off)] {s} ptr mem)
+(MOV(L|W|B)storeconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) =>
+ (MOV(L|W|B)storeconst [sc.addOffset32(off)] {s} ptr mem)
// We need to fold LEAL into the MOVx ops so that the live variable analysis knows
// what variables are being read/written by the ops.
// a separate instruction gives us that register. Having the LEAL be
// a separate instruction also allows it to be CSEd (which is good because
// it compiles to a thunk call).
-(MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
-(MOV(L|W|B|SS|SD)store [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOV(L|W|B|SS|SD)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOV(L|W|B|SS|SD)store [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOV(L|W|B|SS|SD)store [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
-(MOV(L|W|B)storeconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
- && (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
- (MOV(L|W|B)storeconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOV(L|W|B)storeconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOV(L|W|B)storeconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- ((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- ((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- ((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
- && ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ && valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [valoff1.addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
// Merge load/store to op
-((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) -> ((ADD|AND|OR|XOR|SUB|MUL)Lload x [off] {sym} ptr mem)
-((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) -> ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
-((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) -> ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
-(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) -> ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
-(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) ->
+((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|AND|OR|XOR|SUB|MUL)Lload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
+(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
- && y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(c,off) ->
- ((ADD|AND|OR|XOR)Lconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
+ && y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(int64(c),int64(off)) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [makeValAndOff32(c,off)] {sym} ptr mem)
// fold LEALs together
-(LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
+(LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL [off1+off2] {mergeSymTyped(sym1,sym2)} x)
// LEAL into LEAL1
-(LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
- (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAL1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
// LEAL1 into LEAL
-(LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
// LEAL into LEAL[248]
-(LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
- (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
-(LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
- (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
-(LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
- (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAL2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
+(LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAL4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
+(LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAL8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
// LEAL[248] into LEAL
-(LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
-(LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
-(LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
+(LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
+(LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
// LEAL[1248] into LEAL[1248]. Only some such merges are possible.
-(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} y y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} x y)
-(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} y x)
-(LEAL2 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(off1+2*off2) ->
+(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL2 [off1+off2] {mergeSymTyped(sym1, sym2)} x y)
+(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL2 [off1+off2] {mergeSymTyped(sym1, sym2)} y x)
+(LEAL2 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(int64(off1)+2*int64(off2)) =>
(LEAL4 [off1+2*off2] {sym} x y)
-(LEAL4 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(off1+4*off2) ->
+(LEAL4 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(int64(off1)+4*int64(off2)) =>
(LEAL8 [off1+4*off2] {sym} x y)
// Absorb InvertFlags into branches.
-(LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
-(GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
-(LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
-(GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
-(ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no)
-(UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no)
-(ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no)
-(UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no)
-(EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
-(NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
// Constant comparisons.
-(CMPLconst (MOVLconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
-(CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT)
-(CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT)
-(CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT)
-(CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT)
-(CMPWconst (MOVLconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ)
-(CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)<uint16(y) -> (FlagLT_ULT)
-(CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)>uint16(y) -> (FlagLT_UGT)
-(CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)<uint16(y) -> (FlagGT_ULT)
-(CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT)
-(CMPBconst (MOVLconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ)
-(CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)<uint8(y) -> (FlagLT_ULT)
-(CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)>uint8(y) -> (FlagLT_UGT)
-(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)<uint8(y) -> (FlagGT_ULT)
-(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT)
+(CMPLconst (MOVLconst [x]) [y]) && x==y => (FlagEQ)
+(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)<uint32(y) => (FlagLT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)>uint32(y) => (FlagLT_UGT)
+(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)<uint32(y) => (FlagGT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)>uint32(y) => (FlagGT_UGT)
+
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)==y => (FlagEQ)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)<uint16(y) => (FlagLT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)>uint16(y) => (FlagLT_UGT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)<uint16(y) => (FlagGT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)>uint16(y) => (FlagGT_UGT)
+
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)==y => (FlagEQ)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)<uint8(y) => (FlagLT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)>uint8(y) => (FlagLT_UGT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)<uint8(y) => (FlagGT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)>uint8(y) => (FlagGT_UGT)
// Other known comparisons.
(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) -> (FlagLT_ULT)
if v_1.Op != Op386MOVLconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
f := v_2
v.reset(Op386ADCLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, f)
return true
}
if v_1.Op != Op386MOVLconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(Op386ADDLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != Op386SHLLconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != Op386SHRLconst {
continue
}
- d := v_1.AuxInt
+ d := auxIntToInt32(v_1.AuxInt)
if x != v_1.Args[0] || !(d == 32-c) {
continue
}
v.reset(Op386ROLLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
break
}
// match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d]))
- // cond: c < 16 && d == 16-c && t.Size() == 2
- // result: (ROLWconst x [c])
+ // cond: c < 16 && d == int16(16-c) && t.Size() == 2
+ // result: (ROLWconst x [int16(c)])
for {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != Op386SHLLconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != Op386SHRWconst {
continue
}
- d := v_1.AuxInt
- if x != v_1.Args[0] || !(c < 16 && d == 16-c && t.Size() == 2) {
+ d := auxIntToInt16(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 16 && d == int16(16-c) && t.Size() == 2) {
continue
}
v.reset(Op386ROLWconst)
- v.AuxInt = c
+ v.AuxInt = int16ToAuxInt(int16(c))
v.AddArg(x)
return true
}
break
}
// match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d]))
- // cond: c < 8 && d == 8-c && t.Size() == 1
- // result: (ROLBconst x [c])
+ // cond: c < 8 && d == int8(8-c) && t.Size() == 1
+ // result: (ROLBconst x [int8(c)])
for {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != Op386SHLLconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != Op386SHRBconst {
continue
}
- d := v_1.AuxInt
- if x != v_1.Args[0] || !(c < 8 && d == 8-c && t.Size() == 1) {
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 8 && d == int8(8-c) && t.Size() == 1) {
continue
}
v.reset(Op386ROLBconst)
- v.AuxInt = c
+ v.AuxInt = int8ToAuxInt(int8(c))
v.AddArg(x)
return true
}
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
- if v_1.Op != Op386SHLLconst || v_1.AuxInt != 3 {
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 3 {
continue
}
y := v_1.Args[0]
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
- if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 {
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 2 {
continue
}
y := v_1.Args[0]
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
- if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 {
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 {
continue
}
y := v_1.Args[0]
if v_0.Op != Op386ADDLconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
y := v_1
v.reset(Op386LEAL1)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
return true
}
if v_1.Op != Op386LEAL {
continue
}
- c := v_1.AuxInt
- s := v_1.Aux
+ c := auxIntToInt32(v_1.AuxInt)
+ s := auxToSym(v_1.Aux)
y := v_1.Args[0]
if !(x.Op != OpSB && y.Op != OpSB) {
continue
}
v.reset(Op386LEAL1)
- v.AuxInt = c
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
if l.Op != Op386MOVLload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(Op386ADDLload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
if v_1.Op != Op386MOVLconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(Op386ADDLconstcarry)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (ADDLconst [c] (ADDL x y))
// result: (LEAL1 [c] x y)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != Op386ADDL {
break
}
y := v_0.Args[1]
x := v_0.Args[0]
v.reset(Op386LEAL1)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, y)
return true
}
// match: (ADDLconst [c] (LEAL [d] {s} x))
- // cond: is32Bit(c+d)
+ // cond: is32Bit(int64(c)+int64(d))
// result: (LEAL [c+d] {s} x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != Op386LEAL {
break
}
- d := v_0.AuxInt
- s := v_0.Aux
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
x := v_0.Args[0]
- if !(is32Bit(c + d)) {
+ if !(is32Bit(int64(c) + int64(d))) {
break
}
v.reset(Op386LEAL)
- v.AuxInt = c + d
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
v.AddArg(x)
return true
}
// match: (ADDLconst [c] (LEAL1 [d] {s} x y))
- // cond: is32Bit(c+d)
+ // cond: is32Bit(int64(c)+int64(d))
// result: (LEAL1 [c+d] {s} x y)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != Op386LEAL1 {
break
}
- d := v_0.AuxInt
- s := v_0.Aux
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
y := v_0.Args[1]
x := v_0.Args[0]
- if !(is32Bit(c + d)) {
+ if !(is32Bit(int64(c) + int64(d))) {
break
}
v.reset(Op386LEAL1)
- v.AuxInt = c + d
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
// match: (ADDLconst [c] (LEAL2 [d] {s} x y))
- // cond: is32Bit(c+d)
+ // cond: is32Bit(int64(c)+int64(d))
// result: (LEAL2 [c+d] {s} x y)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != Op386LEAL2 {
break
}
- d := v_0.AuxInt
- s := v_0.Aux
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
y := v_0.Args[1]
x := v_0.Args[0]
- if !(is32Bit(c + d)) {
+ if !(is32Bit(int64(c) + int64(d))) {
break
}
v.reset(Op386LEAL2)
- v.AuxInt = c + d
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
// match: (ADDLconst [c] (LEAL4 [d] {s} x y))
- // cond: is32Bit(c+d)
+ // cond: is32Bit(int64(c)+int64(d))
// result: (LEAL4 [c+d] {s} x y)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != Op386LEAL4 {
break
}
- d := v_0.AuxInt
- s := v_0.Aux
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
y := v_0.Args[1]
x := v_0.Args[0]
- if !(is32Bit(c + d)) {
+ if !(is32Bit(int64(c) + int64(d))) {
break
}
v.reset(Op386LEAL4)
- v.AuxInt = c + d
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
// match: (ADDLconst [c] (LEAL8 [d] {s} x y))
- // cond: is32Bit(c+d)
+ // cond: is32Bit(int64(c)+int64(d))
// result: (LEAL8 [c+d] {s} x y)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != Op386LEAL8 {
break
}
- d := v_0.AuxInt
- s := v_0.Aux
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
y := v_0.Args[1]
x := v_0.Args[0]
- if !(is32Bit(c + d)) {
+ if !(is32Bit(int64(c) + int64(d))) {
break
}
v.reset(Op386LEAL8)
- v.AuxInt = c + d
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
b := v.Block
config := b.Func.Config
// match: (ADDLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2)
- // result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ // cond: valoff1.canAdd32(off2)
+ // result: (ADDLconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
for {
- valoff1 := v.AuxInt
- sym := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2)) {
+ if !(valoff1.canAdd32(off2)) {
break
}
v.reset(Op386ADDLconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(base, mem)
return true
}
// match: (ADDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDLconstmodify [valoff1.addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386ADDLconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (ADDLload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (ADDLload [off1+off2] {sym} val base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386ADDLconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386ADDLload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, base, mem)
return true
}
// match: (ADDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386LEAL {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386ADDLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (ADDLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (ADDLmodify [off1+off2] {sym} base val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386ADDLmodify)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(base, val, mem)
return true
}
// match: (ADDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386ADDLmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
if l.Op != Op386MOVSDload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) {
continue
}
v.reset(Op386ADDSDload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (ADDSDload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (ADDSDload [off1+off2] {sym} val base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386ADDLconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386ADDSDload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, base, mem)
return true
}
// match: (ADDSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDSDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386LEAL {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386ADDSDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
if l.Op != Op386MOVSSload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) {
continue
}
v.reset(Op386ADDSSload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (ADDSSload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (ADDSSload [off1+off2] {sym} val base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386ADDLconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386ADDSSload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, base, mem)
return true
}
// match: (ADDSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDSSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386LEAL {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386ADDSSload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
if v_1.Op != Op386MOVLconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(Op386ANDLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
if l.Op != Op386MOVLload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(Op386ANDLload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: (ANDLconst [c] (ANDLconst [d] x))
// result: (ANDLconst [c & d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != Op386ANDLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(Op386ANDLconst)
- v.AuxInt = c & d
+ v.AuxInt = int32ToAuxInt(c & d)
v.AddArg(x)
return true
}
b := v.Block
config := b.Func.Config
// match: (ANDLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2)
- // result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ // cond: valoff1.canAdd32(off2)
+ // result: (ANDLconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
for {
- valoff1 := v.AuxInt
- sym := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2)) {
+ if !(valoff1.canAdd32(off2)) {
break
}
v.reset(Op386ANDLconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(base, mem)
return true
}
// match: (ANDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ANDLconstmodify [valoff1.addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386ANDLconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (ANDLload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (ANDLload [off1+off2] {sym} val base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386ADDLconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386ANDLload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, base, mem)
return true
}
// match: (ANDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ANDLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386LEAL {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386ANDLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (ANDLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (ANDLmodify [off1+off2] {sym} base val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386ANDLmodify)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(base, val, mem)
return true
}
// match: (ANDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ANDLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386ANDLmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
v_0 := v.Args[0]
b := v.Block
// match: (CMPB x (MOVLconst [c]))
- // result: (CMPBconst x [int64(int8(c))])
+ // result: (CMPBconst x [int8(c)])
for {
x := v_0
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(Op386CMPBconst)
- v.AuxInt = int64(int8(c))
+ v.AuxInt = int8ToAuxInt(int8(c))
v.AddArg(x)
return true
}
// match: (CMPB (MOVLconst [c]) x)
- // result: (InvertFlags (CMPBconst x [int64(int8(c))]))
+ // result: (InvertFlags (CMPBconst x [int8(c)]))
for {
if v_0.Op != Op386MOVLconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_1
v.reset(Op386InvertFlags)
v0 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
- v0.AuxInt = int64(int8(c))
+ v0.AuxInt = int8ToAuxInt(int8(c))
v0.AddArg(x)
v.AddArg(v0)
return true
v_0 := v.Args[0]
b := v.Block
// match: (CMPBconst (MOVLconst [x]) [y])
- // cond: int8(x)==int8(y)
+ // cond: int8(x)==y
// result: (FlagEQ)
for {
- y := v.AuxInt
+ y := auxIntToInt8(v.AuxInt)
if v_0.Op != Op386MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int8(x) == int8(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) == y) {
break
}
v.reset(Op386FlagEQ)
return true
}
// match: (CMPBconst (MOVLconst [x]) [y])
- // cond: int8(x)<int8(y) && uint8(x)<uint8(y)
+ // cond: int8(x)<y && uint8(x)<uint8(y)
// result: (FlagLT_ULT)
for {
- y := v.AuxInt
+ y := auxIntToInt8(v.AuxInt)
if v_0.Op != Op386MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) < y && uint8(x) < uint8(y)) {
break
}
v.reset(Op386FlagLT_ULT)
return true
}
// match: (CMPBconst (MOVLconst [x]) [y])
- // cond: int8(x)<int8(y) && uint8(x)>uint8(y)
+ // cond: int8(x)<y && uint8(x)>uint8(y)
// result: (FlagLT_UGT)
for {
- y := v.AuxInt
+ y := auxIntToInt8(v.AuxInt)
if v_0.Op != Op386MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) < y && uint8(x) > uint8(y)) {
break
}
v.reset(Op386FlagLT_UGT)
return true
}
// match: (CMPBconst (MOVLconst [x]) [y])
- // cond: int8(x)>int8(y) && uint8(x)<uint8(y)
+ // cond: int8(x)>y && uint8(x)<uint8(y)
// result: (FlagGT_ULT)
for {
- y := v.AuxInt
+ y := auxIntToInt8(v.AuxInt)
if v_0.Op != Op386MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) > y && uint8(x) < uint8(y)) {
break
}
v.reset(Op386FlagGT_ULT)
return true
}
// match: (CMPBconst (MOVLconst [x]) [y])
- // cond: int8(x)>int8(y) && uint8(x)>uint8(y)
+ // cond: int8(x)>y && uint8(x)>uint8(y)
// result: (FlagGT_UGT)
for {
- y := v.AuxInt
+ y := auxIntToInt8(v.AuxInt)
if v_0.Op != Op386MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) > y && uint8(x) > uint8(y)) {
break
}
v.reset(Op386FlagGT_UGT)
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(Op386CMPLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != Op386MOVLconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_1
v.reset(Op386InvertFlags)
v0 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int32ToAuxInt(c)
v0.AddArg(x)
v.AddArg(v0)
return true
v_0 := v.Args[0]
b := v.Block
// match: (CMPLconst (MOVLconst [x]) [y])
- // cond: int32(x)==int32(y)
+ // cond: x==y
// result: (FlagEQ)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != Op386MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int32(x) == int32(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x == y) {
break
}
v.reset(Op386FlagEQ)
return true
}
// match: (CMPLconst (MOVLconst [x]) [y])
- // cond: int32(x)<int32(y) && uint32(x)<uint32(y)
+ // cond: x<y && uint32(x)<uint32(y)
// result: (FlagLT_ULT)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != Op386MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x < y && uint32(x) < uint32(y)) {
break
}
v.reset(Op386FlagLT_ULT)
return true
}
// match: (CMPLconst (MOVLconst [x]) [y])
- // cond: int32(x)<int32(y) && uint32(x)>uint32(y)
+ // cond: x<y && uint32(x)>uint32(y)
// result: (FlagLT_UGT)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != Op386MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x < y && uint32(x) > uint32(y)) {
break
}
v.reset(Op386FlagLT_UGT)
return true
}
// match: (CMPLconst (MOVLconst [x]) [y])
- // cond: int32(x)>int32(y) && uint32(x)<uint32(y)
+ // cond: x>y && uint32(x)<uint32(y)
// result: (FlagGT_ULT)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != Op386MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > y && uint32(x) < uint32(y)) {
break
}
v.reset(Op386FlagGT_ULT)
return true
}
// match: (CMPLconst (MOVLconst [x]) [y])
- // cond: int32(x)>int32(y) && uint32(x)>uint32(y)
+ // cond: x>y && uint32(x)>uint32(y)
// result: (FlagGT_UGT)
for {
- y := v.AuxInt
+ y := auxIntToInt32(v.AuxInt)
if v_0.Op != Op386MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > y && uint32(x) > uint32(y)) {
break
}
v.reset(Op386FlagGT_UGT)
v_0 := v.Args[0]
b := v.Block
// match: (CMPW x (MOVLconst [c]))
- // result: (CMPWconst x [int64(int16(c))])
+ // result: (CMPWconst x [int16(c)])
for {
x := v_0
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(Op386CMPWconst)
- v.AuxInt = int64(int16(c))
+ v.AuxInt = int16ToAuxInt(int16(c))
v.AddArg(x)
return true
}
// match: (CMPW (MOVLconst [c]) x)
- // result: (InvertFlags (CMPWconst x [int64(int16(c))]))
+ // result: (InvertFlags (CMPWconst x [int16(c)]))
for {
if v_0.Op != Op386MOVLconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_1
v.reset(Op386InvertFlags)
v0 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
- v0.AuxInt = int64(int16(c))
+ v0.AuxInt = int16ToAuxInt(int16(c))
v0.AddArg(x)
v.AddArg(v0)
return true
v_0 := v.Args[0]
b := v.Block
// match: (CMPWconst (MOVLconst [x]) [y])
- // cond: int16(x)==int16(y)
+ // cond: int16(x)==y
// result: (FlagEQ)
for {
- y := v.AuxInt
+ y := auxIntToInt16(v.AuxInt)
if v_0.Op != Op386MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int16(x) == int16(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) == y) {
break
}
v.reset(Op386FlagEQ)
return true
}
// match: (CMPWconst (MOVLconst [x]) [y])
- // cond: int16(x)<int16(y) && uint16(x)<uint16(y)
+ // cond: int16(x)<y && uint16(x)<uint16(y)
// result: (FlagLT_ULT)
for {
- y := v.AuxInt
+ y := auxIntToInt16(v.AuxInt)
if v_0.Op != Op386MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) < y && uint16(x) < uint16(y)) {
break
}
v.reset(Op386FlagLT_ULT)
return true
}
// match: (CMPWconst (MOVLconst [x]) [y])
- // cond: int16(x)<int16(y) && uint16(x)>uint16(y)
+ // cond: int16(x)<y && uint16(x)>uint16(y)
// result: (FlagLT_UGT)
for {
- y := v.AuxInt
+ y := auxIntToInt16(v.AuxInt)
if v_0.Op != Op386MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) < y && uint16(x) > uint16(y)) {
break
}
v.reset(Op386FlagLT_UGT)
return true
}
// match: (CMPWconst (MOVLconst [x]) [y])
- // cond: int16(x)>int16(y) && uint16(x)<uint16(y)
+ // cond: int16(x)>y && uint16(x)<uint16(y)
// result: (FlagGT_ULT)
for {
- y := v.AuxInt
+ y := auxIntToInt16(v.AuxInt)
if v_0.Op != Op386MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) > y && uint16(x) < uint16(y)) {
break
}
v.reset(Op386FlagGT_ULT)
return true
}
// match: (CMPWconst (MOVLconst [x]) [y])
- // cond: int16(x)>int16(y) && uint16(x)>uint16(y)
+ // cond: int16(x)>y && uint16(x)>uint16(y)
// result: (FlagGT_UGT)
for {
- y := v.AuxInt
+ y := auxIntToInt16(v.AuxInt)
if v_0.Op != Op386MOVLconst {
break
}
- x := v_0.AuxInt
- if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) > y && uint16(x) > uint16(y)) {
break
}
v.reset(Op386FlagGT_UGT)
if l.Op != Op386MOVSDload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) {
break
}
v.reset(Op386DIVSDload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (DIVSDload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (DIVSDload [off1+off2] {sym} val base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386ADDLconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386DIVSDload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, base, mem)
return true
}
// match: (DIVSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (DIVSDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386LEAL {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386DIVSDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
if l.Op != Op386MOVSSload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) {
break
}
v.reset(Op386DIVSSload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (DIVSSload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (DIVSSload [off1+off2] {sym} val base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386ADDLconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386DIVSSload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, base, mem)
return true
}
// match: (DIVSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (DIVSSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386LEAL {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386DIVSSload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
func rewriteValue386_Op386LEAL(v *Value) bool {
v_0 := v.Args[0]
// match: (LEAL [c] {s} (ADDLconst [d] x))
- // cond: is32Bit(c+d)
+ // cond: is32Bit(int64(c)+int64(d))
// result: (LEAL [c+d] {s} x)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(is32Bit(c + d)) {
+ if !(is32Bit(int64(c) + int64(d))) {
break
}
v.reset(Op386LEAL)
- v.AuxInt = c + d
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
v.AddArg(x)
return true
}
// cond: x.Op != OpSB && y.Op != OpSB
// result: (LEAL1 [c] {s} x y)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
if v_0.Op != Op386ADDL {
break
}
continue
}
v.reset(Op386LEAL1)
- v.AuxInt = c
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
break
}
// match: (LEAL [off1] {sym1} (LEAL [off2] {sym2} x))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL [off1+off2] {mergeSymTyped(sym1,sym2)} x)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
x := v_0.Args[0]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(Op386LEAL)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg(x)
return true
}
// match: (LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL1 {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
y := v_0.Args[1]
x := v_0.Args[0]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(Op386LEAL1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
// match: (LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL2 {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
y := v_0.Args[1]
x := v_0.Args[0]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(Op386LEAL2)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
// match: (LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL4 {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
y := v_0.Args[1]
x := v_0.Args[0]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(Op386LEAL4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
// match: (LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL8 {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
y := v_0.Args[1]
x := v_0.Args[0]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(Op386LEAL8)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (LEAL1 [c] {s} (ADDLconst [d] x) y)
- // cond: is32Bit(c+d) && x.Op != OpSB
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
// result: (LEAL1 [c+d] {s} x y)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != Op386ADDLconst {
continue
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
y := v_1
- if !(is32Bit(c+d) && x.Op != OpSB) {
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
continue
}
v.reset(Op386LEAL1)
- v.AuxInt = c + d
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
// match: (LEAL1 [c] {s} x (SHLLconst [1] y))
// result: (LEAL2 [c] {s} x y)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
- if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 {
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 {
continue
}
y := v_1.Args[0]
v.reset(Op386LEAL2)
- v.AuxInt = c
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
// match: (LEAL1 [c] {s} x (SHLLconst [2] y))
// result: (LEAL4 [c] {s} x y)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
- if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 {
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 2 {
continue
}
y := v_1.Args[0]
v.reset(Op386LEAL4)
- v.AuxInt = c
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
// match: (LEAL1 [c] {s} x (SHLLconst [3] y))
// result: (LEAL8 [c] {s} x y)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
- if v_1.Op != Op386SHLLconst || v_1.AuxInt != 3 {
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 3 {
continue
}
y := v_1.Args[0]
v.reset(Op386LEAL8)
- v.AuxInt = c
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
break
}
// match: (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != Op386LEAL {
continue
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
x := v_0.Args[0]
y := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
continue
}
v.reset(Op386LEAL1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
break
}
// match: (LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} y y))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL2 [off1+off2] {mergeSymTyped(sym1, sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1.Op != Op386LEAL1 {
continue
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
y := v_1.Args[1]
- if y != v_1.Args[0] || !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
continue
}
v.reset(Op386LEAL2)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
break
}
// match: (LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} x y))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} y x)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL2 [off1+off2] {mergeSymTyped(sym1, sym2)} y x)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1.Op != Op386LEAL1 {
continue
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
v_1_1 := v_1.Args[1]
continue
}
y := v_1_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
continue
}
v.reset(Op386LEAL2)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(y, x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (LEAL2 [c] {s} (ADDLconst [d] x) y)
- // cond: is32Bit(c+d) && x.Op != OpSB
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
// result: (LEAL2 [c+d] {s} x y)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
y := v_1
- if !(is32Bit(c+d) && x.Op != OpSB) {
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
break
}
v.reset(Op386LEAL2)
- v.AuxInt = c + d
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
// match: (LEAL2 [c] {s} x (ADDLconst [d] y))
- // cond: is32Bit(c+2*d) && y.Op != OpSB
+ // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB
// result: (LEAL2 [c+2*d] {s} x y)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
x := v_0
if v_1.Op != Op386ADDLconst {
break
}
- d := v_1.AuxInt
+ d := auxIntToInt32(v_1.AuxInt)
y := v_1.Args[0]
- if !(is32Bit(c+2*d) && y.Op != OpSB) {
+ if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
break
}
v.reset(Op386LEAL2)
- v.AuxInt = c + 2*d
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c + 2*d)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
// match: (LEAL2 [c] {s} x (SHLLconst [1] y))
// result: (LEAL4 [c] {s} x y)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
x := v_0
- if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 {
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 {
break
}
y := v_1.Args[0]
v.reset(Op386LEAL4)
- v.AuxInt = c
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
// match: (LEAL2 [c] {s} x (SHLLconst [2] y))
// result: (LEAL8 [c] {s} x y)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
x := v_0
- if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 {
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 2 {
break
}
y := v_1.Args[0]
v.reset(Op386LEAL8)
- v.AuxInt = c
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
// match: (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
x := v_0.Args[0]
y := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
v.reset(Op386LEAL2)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
// match: (LEAL2 [off1] {sym} x (LEAL1 [off2] {nil} y y))
- // cond: is32Bit(off1+2*off2)
+ // cond: is32Bit(int64(off1)+2*int64(off2))
// result: (LEAL4 [off1+2*off2] {sym} x y)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if v_1.Op != Op386LEAL1 {
break
}
- off2 := v_1.AuxInt
- if v_1.Aux != nil {
+ off2 := auxIntToInt32(v_1.AuxInt)
+ if auxToSym(v_1.Aux) != nil {
break
}
y := v_1.Args[1]
- if y != v_1.Args[0] || !(is32Bit(off1 + 2*off2)) {
+ if y != v_1.Args[0] || !(is32Bit(int64(off1) + 2*int64(off2))) {
break
}
v.reset(Op386LEAL4)
- v.AuxInt = off1 + 2*off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + 2*off2)
+ v.Aux = symToAux(sym)
v.AddArg2(x, y)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (LEAL4 [c] {s} (ADDLconst [d] x) y)
- // cond: is32Bit(c+d) && x.Op != OpSB
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
// result: (LEAL4 [c+d] {s} x y)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
y := v_1
- if !(is32Bit(c+d) && x.Op != OpSB) {
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
break
}
v.reset(Op386LEAL4)
- v.AuxInt = c + d
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
// match: (LEAL4 [c] {s} x (ADDLconst [d] y))
- // cond: is32Bit(c+4*d) && y.Op != OpSB
+ // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB
// result: (LEAL4 [c+4*d] {s} x y)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
x := v_0
if v_1.Op != Op386ADDLconst {
break
}
- d := v_1.AuxInt
+ d := auxIntToInt32(v_1.AuxInt)
y := v_1.Args[0]
- if !(is32Bit(c+4*d) && y.Op != OpSB) {
+ if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
break
}
v.reset(Op386LEAL4)
- v.AuxInt = c + 4*d
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c + 4*d)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
// match: (LEAL4 [c] {s} x (SHLLconst [1] y))
// result: (LEAL8 [c] {s} x y)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
x := v_0
- if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 {
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 {
break
}
y := v_1.Args[0]
v.reset(Op386LEAL8)
- v.AuxInt = c
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
// match: (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
x := v_0.Args[0]
y := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
v.reset(Op386LEAL4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
// match: (LEAL4 [off1] {sym} x (LEAL1 [off2] {nil} y y))
- // cond: is32Bit(off1+4*off2)
+ // cond: is32Bit(int64(off1)+4*int64(off2))
// result: (LEAL8 [off1+4*off2] {sym} x y)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if v_1.Op != Op386LEAL1 {
break
}
- off2 := v_1.AuxInt
- if v_1.Aux != nil {
+ off2 := auxIntToInt32(v_1.AuxInt)
+ if auxToSym(v_1.Aux) != nil {
break
}
y := v_1.Args[1]
- if y != v_1.Args[0] || !(is32Bit(off1 + 4*off2)) {
+ if y != v_1.Args[0] || !(is32Bit(int64(off1) + 4*int64(off2))) {
break
}
v.reset(Op386LEAL8)
- v.AuxInt = off1 + 4*off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + 4*off2)
+ v.Aux = symToAux(sym)
v.AddArg2(x, y)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (LEAL8 [c] {s} (ADDLconst [d] x) y)
- // cond: is32Bit(c+d) && x.Op != OpSB
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
// result: (LEAL8 [c+d] {s} x y)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
y := v_1
- if !(is32Bit(c+d) && x.Op != OpSB) {
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
break
}
v.reset(Op386LEAL8)
- v.AuxInt = c + d
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
// match: (LEAL8 [c] {s} x (ADDLconst [d] y))
- // cond: is32Bit(c+8*d) && y.Op != OpSB
+ // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB
// result: (LEAL8 [c+8*d] {s} x y)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
x := v_0
if v_1.Op != Op386ADDLconst {
break
}
- d := v_1.AuxInt
+ d := auxIntToInt32(v_1.AuxInt)
y := v_1.Args[0]
- if !(is32Bit(c+8*d) && y.Op != OpSB) {
+ if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
break
}
v.reset(Op386LEAL8)
- v.AuxInt = c + 8*d
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c + 8*d)
+ v.Aux = symToAux(s)
v.AddArg2(x, y)
return true
}
// match: (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
x := v_0.Args[0]
y := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
v.reset(Op386LEAL8)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
if x.Op != Op386MOVBload {
break
}
- off := x.AuxInt
- sym := x.Aux
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
mem := x.Args[1]
ptr := x.Args[0]
if !(x.Uses == 1 && clobber(x)) {
b = x.Block
v0 := b.NewValue0(x.Pos, Op386MOVBLSXload, v.Type)
v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
if v_0.Op != Op386ANDLconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
if !(c&0x80 == 0) {
break
}
v.reset(Op386ANDLconst)
- v.AuxInt = c & 0x7f
+ v.AuxInt = int32ToAuxInt(c & 0x7f)
v.AddArg(x)
return true
}
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: (MOVBLSX x)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != Op386MOVBstore {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
return true
}
// match: (MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBLSXload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MOVBLSXload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
if x.Op != Op386MOVBload {
break
}
- off := x.AuxInt
- sym := x.Aux
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
mem := x.Args[1]
ptr := x.Args[0]
if !(x.Uses == 1 && clobber(x)) {
b = x.Block
v0 := b.NewValue0(x.Pos, Op386MOVBload, v.Type)
v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
if v_0.Op != Op386ANDLconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(Op386ANDLconst)
- v.AuxInt = c & 0xff
+ v.AuxInt = int32ToAuxInt(c & 0xff)
v.AddArg(x)
return true
}
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: (MOVBLZX x)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != Op386MOVBstore {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
return true
}
// match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVBload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386MOVBload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MOVBload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVBLSX x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != Op386MOVBLSX {
break
x := v_1.Args[0]
mem := v_2
v.reset(Op386MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVBLZX x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != Op386MOVBLZX {
break
x := v_1.Args[0]
mem := v_2
v.reset(Op386MOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVBstore [off1+off2] {sym} ptr val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
- // cond: validOff(off)
- // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
+ // cond: validOff(int64(off))
+ // result: (MOVBstoreconst [makeValAndOff32(c,off)] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
mem := v_2
- if !(validOff(off)) {
+ if !(validOff(int64(off))) {
break
}
v.reset(Op386MOVBstoreconst)
- v.AuxInt = makeValAndOff(int64(int8(c)), off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
- // cond: ValAndOff(sc).canAdd(off)
- // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
for {
- sc := v.AuxInt
- s := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off := v_0.AuxInt
+ off := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(ValAndOff(sc).canAdd(off)) {
+ if !(sc.canAdd32(off)) {
break
}
v.reset(Op386MOVBstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = s
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
- // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
for {
- sc := v.AuxInt
- sym1 := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off := v_0.AuxInt
- sym2 := v_0.Aux
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MOVBstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: x
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != Op386MOVLstore {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
return true
}
// match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVLload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386MOVLload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVLload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MOVLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVLstore [off1+off2] {sym} ptr val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386MOVLstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
- // cond: validOff(off)
- // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
+ // cond: validOff(int64(off))
+ // result: (MOVLstoreconst [makeValAndOff32(c,off)] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
mem := v_2
- if !(validOff(off)) {
+ if !(validOff(int64(off))) {
break
}
v.reset(Op386MOVLstoreconst)
- v.AuxInt = makeValAndOff(int64(int32(c)), off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVLstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MOVLstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
// cond: y.Uses==1 && clobber(y)
// result: (ADDLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
- if y.Op != Op386ADDLload || y.AuxInt != off || y.Aux != sym {
+ if y.Op != Op386ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
break
}
mem := y.Args[2]
break
}
v.reset(Op386ADDLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && clobber(y)
// result: (ANDLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
- if y.Op != Op386ANDLload || y.AuxInt != off || y.Aux != sym {
+ if y.Op != Op386ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
break
}
mem := y.Args[2]
break
}
v.reset(Op386ANDLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && clobber(y)
// result: (ORLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
- if y.Op != Op386ORLload || y.AuxInt != off || y.Aux != sym {
+ if y.Op != Op386ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
break
}
mem := y.Args[2]
break
}
v.reset(Op386ORLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && clobber(y)
// result: (XORLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
- if y.Op != Op386XORLload || y.AuxInt != off || y.Aux != sym {
+ if y.Op != Op386XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
break
}
mem := y.Args[2]
break
}
v.reset(Op386XORLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (ADDLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != Op386ADDL {
y_1 := y.Args[1]
for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
l := y_0
- if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
continue
}
mem := l.Args[1]
continue
}
v.reset(Op386ADDLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (SUBLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != Op386SUBL {
}
x := y.Args[1]
l := y.Args[0]
- if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
break
}
v.reset(Op386SUBLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (ANDLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != Op386ANDL {
y_1 := y.Args[1]
for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
l := y_0
- if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
continue
}
mem := l.Args[1]
continue
}
v.reset(Op386ANDLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (ORLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != Op386ORL {
y_1 := y.Args[1]
for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
l := y_0
- if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
continue
}
mem := l.Args[1]
continue
}
v.reset(Op386ORLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (XORLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != Op386XORL {
y_1 := y.Args[1]
for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
l := y_0
- if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
continue
}
mem := l.Args[1]
continue
}
v.reset(Op386XORLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
break
}
// match: (MOVLstore {sym} [off] ptr y:(ADDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
- // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(c,off)
- // result: (ADDLconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(int64(c),int64(off))
+ // result: (ADDLconstmodify [makeValAndOff32(c,off)] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != Op386ADDLconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt32(y.AuxInt)
l := y.Args[0]
- if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
- if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(c, off)) {
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(int64(c), int64(off))) {
break
}
v.reset(Op386ADDLconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(ANDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
- // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(c,off)
- // result: (ANDLconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(int64(c),int64(off))
+ // result: (ANDLconstmodify [makeValAndOff32(c,off)] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != Op386ANDLconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt32(y.AuxInt)
l := y.Args[0]
- if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
- if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(c, off)) {
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(int64(c), int64(off))) {
break
}
v.reset(Op386ANDLconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(ORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
- // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(c,off)
- // result: (ORLconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(int64(c),int64(off))
+ // result: (ORLconstmodify [makeValAndOff32(c,off)] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != Op386ORLconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt32(y.AuxInt)
l := y.Args[0]
- if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
- if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(c, off)) {
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(int64(c), int64(off))) {
break
}
v.reset(Op386ORLconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(XORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
- // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(c,off)
- // result: (XORLconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(int64(c),int64(off))
+ // result: (XORLconstmodify [makeValAndOff32(c,off)] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != Op386XORLconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt32(y.AuxInt)
l := y.Args[0]
- if l.Op != Op386MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
- if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(c, off)) {
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(int64(c), int64(off))) {
break
}
v.reset(Op386XORLconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
- // cond: ValAndOff(sc).canAdd(off)
- // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVLstoreconst [sc.addOffset32(off)] {s} ptr mem)
for {
- sc := v.AuxInt
- s := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off := v_0.AuxInt
+ off := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(ValAndOff(sc).canAdd(off)) {
+ if !(sc.canAdd32(off)) {
break
}
v.reset(Op386MOVLstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = s
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
- // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVLstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
for {
- sc := v.AuxInt
- sym1 := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off := v_0.AuxInt
- sym2 := v_0.Aux
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MOVLstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVSDload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386MOVSDload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSDload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MOVSDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVSDstore [off1+off2] {sym} ptr val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386MOVSDstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSDstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MOVSDstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVSSload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386MOVSSload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSSload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MOVSSload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVSSstore [off1+off2] {sym} ptr val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386MOVSSstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSSstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MOVSSstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
if x.Op != Op386MOVWload {
break
}
- off := x.AuxInt
- sym := x.Aux
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
mem := x.Args[1]
ptr := x.Args[0]
if !(x.Uses == 1 && clobber(x)) {
b = x.Block
v0 := b.NewValue0(x.Pos, Op386MOVWLSXload, v.Type)
v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
if v_0.Op != Op386ANDLconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
if !(c&0x8000 == 0) {
break
}
v.reset(Op386ANDLconst)
- v.AuxInt = c & 0x7fff
+ v.AuxInt = int32ToAuxInt(c & 0x7fff)
v.AddArg(x)
return true
}
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: (MOVWLSX x)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != Op386MOVWstore {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
return true
}
// match: (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWLSXload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MOVWLSXload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
if x.Op != Op386MOVWload {
break
}
- off := x.AuxInt
- sym := x.Aux
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
mem := x.Args[1]
ptr := x.Args[0]
if !(x.Uses == 1 && clobber(x)) {
b = x.Block
v0 := b.NewValue0(x.Pos, Op386MOVWload, v.Type)
v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
if v_0.Op != Op386ANDLconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(Op386ANDLconst)
- v.AuxInt = c & 0xffff
+ v.AuxInt = int32ToAuxInt(c & 0xffff)
v.AddArg(x)
return true
}
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: (MOVWLZX x)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != Op386MOVWstore {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
return true
}
// match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVWload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386MOVWload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MOVWload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVWLSX x) mem)
// result: (MOVWstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != Op386MOVWLSX {
break
x := v_1.Args[0]
mem := v_2
v.reset(Op386MOVWstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVWLZX x) mem)
// result: (MOVWstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != Op386MOVWLZX {
break
x := v_1.Args[0]
mem := v_2
v.reset(Op386MOVWstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVWstore [off1+off2] {sym} ptr val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
- // cond: validOff(off)
- // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
+ // cond: validOff(int64(off))
+ // result: (MOVWstoreconst [makeValAndOff32(c,off)] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
mem := v_2
- if !(validOff(off)) {
+ if !(validOff(int64(off))) {
break
}
v.reset(Op386MOVWstoreconst)
- v.AuxInt = makeValAndOff(int64(int16(c)), off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
- // cond: ValAndOff(sc).canAdd(off)
- // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
for {
- sc := v.AuxInt
- s := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off := v_0.AuxInt
+ off := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(ValAndOff(sc).canAdd(off)) {
+ if !(sc.canAdd32(off)) {
break
}
v.reset(Op386MOVWstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = s
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
- // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
for {
- sc := v.AuxInt
- sym1 := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off := v_0.AuxInt
- sym2 := v_0.Aux
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MOVWstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
if v_1.Op != Op386MOVLconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(Op386MULLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
if l.Op != Op386MOVLload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(Op386MULLload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
v_0 := v.Args[0]
b := v.Block
// match: (MULLconst [c] (MULLconst [d] x))
- // result: (MULLconst [int64(int32(c * d))] x)
+ // result: (MULLconst [c * d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != Op386MULLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(Op386MULLconst)
- v.AuxInt = int64(int32(c * d))
+ v.AuxInt = int32ToAuxInt(c * d)
v.AddArg(x)
return true
}
// match: (MULLconst [-9] x)
// result: (NEGL (LEAL8 <v.Type> x x))
for {
- if v.AuxInt != -9 {
+ if auxIntToInt32(v.AuxInt) != -9 {
break
}
x := v_0
// match: (MULLconst [-5] x)
// result: (NEGL (LEAL4 <v.Type> x x))
for {
- if v.AuxInt != -5 {
+ if auxIntToInt32(v.AuxInt) != -5 {
break
}
x := v_0
// match: (MULLconst [-3] x)
// result: (NEGL (LEAL2 <v.Type> x x))
for {
- if v.AuxInt != -3 {
+ if auxIntToInt32(v.AuxInt) != -3 {
break
}
x := v_0
// match: (MULLconst [-1] x)
// result: (NEGL x)
for {
- if v.AuxInt != -1 {
+ if auxIntToInt32(v.AuxInt) != -1 {
break
}
x := v_0
// match: (MULLconst [0] _)
// result: (MOVLconst [0])
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
v.reset(Op386MOVLconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
// match: (MULLconst [1] x)
// result: x
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
x := v_0
// match: (MULLconst [3] x)
// result: (LEAL2 x x)
for {
- if v.AuxInt != 3 {
+ if auxIntToInt32(v.AuxInt) != 3 {
break
}
x := v_0
// match: (MULLconst [5] x)
// result: (LEAL4 x x)
for {
- if v.AuxInt != 5 {
+ if auxIntToInt32(v.AuxInt) != 5 {
break
}
x := v_0
// match: (MULLconst [7] x)
// result: (LEAL2 x (LEAL2 <v.Type> x x))
for {
- if v.AuxInt != 7 {
+ if auxIntToInt32(v.AuxInt) != 7 {
break
}
x := v_0
// match: (MULLconst [9] x)
// result: (LEAL8 x x)
for {
- if v.AuxInt != 9 {
+ if auxIntToInt32(v.AuxInt) != 9 {
break
}
x := v_0
// match: (MULLconst [11] x)
// result: (LEAL2 x (LEAL4 <v.Type> x x))
for {
- if v.AuxInt != 11 {
+ if auxIntToInt32(v.AuxInt) != 11 {
break
}
x := v_0
// match: (MULLconst [13] x)
// result: (LEAL4 x (LEAL2 <v.Type> x x))
for {
- if v.AuxInt != 13 {
+ if auxIntToInt32(v.AuxInt) != 13 {
break
}
x := v_0
// match: (MULLconst [19] x)
// result: (LEAL2 x (LEAL8 <v.Type> x x))
for {
- if v.AuxInt != 19 {
+ if auxIntToInt32(v.AuxInt) != 19 {
break
}
x := v_0
// match: (MULLconst [21] x)
// result: (LEAL4 x (LEAL4 <v.Type> x x))
for {
- if v.AuxInt != 21 {
+ if auxIntToInt32(v.AuxInt) != 21 {
break
}
x := v_0
// match: (MULLconst [25] x)
// result: (LEAL8 x (LEAL2 <v.Type> x x))
for {
- if v.AuxInt != 25 {
+ if auxIntToInt32(v.AuxInt) != 25 {
break
}
x := v_0
// match: (MULLconst [27] x)
// result: (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x))
for {
- if v.AuxInt != 27 {
+ if auxIntToInt32(v.AuxInt) != 27 {
break
}
x := v_0
// match: (MULLconst [37] x)
// result: (LEAL4 x (LEAL8 <v.Type> x x))
for {
- if v.AuxInt != 37 {
+ if auxIntToInt32(v.AuxInt) != 37 {
break
}
x := v_0
// match: (MULLconst [41] x)
// result: (LEAL8 x (LEAL4 <v.Type> x x))
for {
- if v.AuxInt != 41 {
+ if auxIntToInt32(v.AuxInt) != 41 {
break
}
x := v_0
// match: (MULLconst [45] x)
// result: (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x))
for {
- if v.AuxInt != 45 {
+ if auxIntToInt32(v.AuxInt) != 45 {
break
}
x := v_0
// match: (MULLconst [73] x)
// result: (LEAL8 x (LEAL8 <v.Type> x x))
for {
- if v.AuxInt != 73 {
+ if auxIntToInt32(v.AuxInt) != 73 {
break
}
x := v_0
// match: (MULLconst [81] x)
// result: (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x))
for {
- if v.AuxInt != 81 {
+ if auxIntToInt32(v.AuxInt) != 81 {
break
}
x := v_0
return true
}
// match: (MULLconst [c] x)
- // cond: isPowerOfTwo(c+1) && c >= 15
- // result: (SUBL (SHLLconst <v.Type> [log2(c+1)] x) x)
+ // cond: isPowerOfTwo32(c+1) && c >= 15
+ // result: (SUBL (SHLLconst <v.Type> [int32(log32(c+1))] x) x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(isPowerOfTwo(c+1) && c >= 15) {
+ if !(isPowerOfTwo32(c+1) && c >= 15) {
break
}
v.reset(Op386SUBL)
v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
- v0.AuxInt = log2(c + 1)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
v0.AddArg(x)
v.AddArg2(v0, x)
return true
}
// match: (MULLconst [c] x)
- // cond: isPowerOfTwo(c-1) && c >= 17
- // result: (LEAL1 (SHLLconst <v.Type> [log2(c-1)] x) x)
+ // cond: isPowerOfTwo32(c-1) && c >= 17
+ // result: (LEAL1 (SHLLconst <v.Type> [int32(log32(c-1))] x) x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(isPowerOfTwo(c-1) && c >= 17) {
+ if !(isPowerOfTwo32(c-1) && c >= 17) {
break
}
v.reset(Op386LEAL1)
v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
- v0.AuxInt = log2(c - 1)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
v0.AddArg(x)
v.AddArg2(v0, x)
return true
}
// match: (MULLconst [c] x)
- // cond: isPowerOfTwo(c-2) && c >= 34
- // result: (LEAL2 (SHLLconst <v.Type> [log2(c-2)] x) x)
+ // cond: isPowerOfTwo32(c-2) && c >= 34
+ // result: (LEAL2 (SHLLconst <v.Type> [int32(log32(c-2))] x) x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(isPowerOfTwo(c-2) && c >= 34) {
+ if !(isPowerOfTwo32(c-2) && c >= 34) {
break
}
v.reset(Op386LEAL2)
v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
- v0.AuxInt = log2(c - 2)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 2)))
v0.AddArg(x)
v.AddArg2(v0, x)
return true
}
// match: (MULLconst [c] x)
- // cond: isPowerOfTwo(c-4) && c >= 68
- // result: (LEAL4 (SHLLconst <v.Type> [log2(c-4)] x) x)
+ // cond: isPowerOfTwo32(c-4) && c >= 68
+ // result: (LEAL4 (SHLLconst <v.Type> [int32(log32(c-4))] x) x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(isPowerOfTwo(c-4) && c >= 68) {
+ if !(isPowerOfTwo32(c-4) && c >= 68) {
break
}
v.reset(Op386LEAL4)
v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
- v0.AuxInt = log2(c - 4)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 4)))
v0.AddArg(x)
v.AddArg2(v0, x)
return true
}
// match: (MULLconst [c] x)
- // cond: isPowerOfTwo(c-8) && c >= 136
- // result: (LEAL8 (SHLLconst <v.Type> [log2(c-8)] x) x)
+ // cond: isPowerOfTwo32(c-8) && c >= 136
+ // result: (LEAL8 (SHLLconst <v.Type> [int32(log32(c-8))] x) x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(isPowerOfTwo(c-8) && c >= 136) {
+ if !(isPowerOfTwo32(c-8) && c >= 136) {
break
}
v.reset(Op386LEAL8)
v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
- v0.AuxInt = log2(c - 8)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 8)))
v0.AddArg(x)
v.AddArg2(v0, x)
return true
}
// match: (MULLconst [c] x)
- // cond: c%3 == 0 && isPowerOfTwo(c/3)
- // result: (SHLLconst [log2(c/3)] (LEAL2 <v.Type> x x))
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (SHLLconst [int32(log32(c/3))] (LEAL2 <v.Type> x x))
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(c%3 == 0 && isPowerOfTwo(c/3)) {
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
break
}
v.reset(Op386SHLLconst)
- v.AuxInt = log2(c / 3)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
// match: (MULLconst [c] x)
- // cond: c%5 == 0 && isPowerOfTwo(c/5)
- // result: (SHLLconst [log2(c/5)] (LEAL4 <v.Type> x x))
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (SHLLconst [int32(log32(c/5))] (LEAL4 <v.Type> x x))
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(c%5 == 0 && isPowerOfTwo(c/5)) {
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
break
}
v.reset(Op386SHLLconst)
- v.AuxInt = log2(c / 5)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
v0.AddArg2(x, x)
v.AddArg(v0)
return true
}
// match: (MULLconst [c] x)
- // cond: c%9 == 0 && isPowerOfTwo(c/9)
- // result: (SHLLconst [log2(c/9)] (LEAL8 <v.Type> x x))
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (SHLLconst [int32(log32(c/9))] (LEAL8 <v.Type> x x))
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
break
}
v.reset(Op386SHLLconst)
- v.AuxInt = log2(c / 9)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
v0.AddArg2(x, x)
v.AddArg(v0)
b := v.Block
config := b.Func.Config
// match: (MULLload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MULLload [off1+off2] {sym} val base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386ADDLconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386MULLload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, base, mem)
return true
}
// match: (MULLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MULLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MULLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386LEAL {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MULLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
if l.Op != Op386MOVSDload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) {
continue
}
v.reset(Op386MULSDload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (MULSDload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MULSDload [off1+off2] {sym} val base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386ADDLconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386MULSDload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, base, mem)
return true
}
// match: (MULSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MULSDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386LEAL {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MULSDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
if l.Op != Op386MOVSSload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) {
continue
}
v.reset(Op386MULSSload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (MULSSload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MULSSload [off1+off2] {sym} val base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386ADDLconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386MULSSload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, base, mem)
return true
}
// match: (MULSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MULSSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386LEAL {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386MULSSload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
if v_1.Op != Op386MOVLconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(Op386ORLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != Op386SHLLconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != Op386SHRLconst {
continue
}
- d := v_1.AuxInt
+ d := auxIntToInt32(v_1.AuxInt)
if x != v_1.Args[0] || !(d == 32-c) {
continue
}
v.reset(Op386ROLLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
break
}
// match: ( ORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
- // cond: c < 16 && d == 16-c && t.Size() == 2
- // result: (ROLWconst x [c])
+ // cond: c < 16 && d == int16(16-c) && t.Size() == 2
+ // result: (ROLWconst x [int16(c)])
for {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != Op386SHLLconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != Op386SHRWconst {
continue
}
- d := v_1.AuxInt
- if x != v_1.Args[0] || !(c < 16 && d == 16-c && t.Size() == 2) {
+ d := auxIntToInt16(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 16 && d == int16(16-c) && t.Size() == 2) {
continue
}
v.reset(Op386ROLWconst)
- v.AuxInt = c
+ v.AuxInt = int16ToAuxInt(int16(c))
v.AddArg(x)
return true
}
break
}
// match: ( ORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
- // cond: c < 8 && d == 8-c && t.Size() == 1
- // result: (ROLBconst x [c])
+ // cond: c < 8 && d == int8(8-c) && t.Size() == 1
+ // result: (ROLBconst x [int8(c)])
for {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != Op386SHLLconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != Op386SHRBconst {
continue
}
- d := v_1.AuxInt
- if x != v_1.Args[0] || !(c < 8 && d == 8-c && t.Size() == 1) {
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 8 && d == int8(8-c) && t.Size() == 1) {
continue
}
v.reset(Op386ROLBconst)
- v.AuxInt = c
+ v.AuxInt = int8ToAuxInt(int8(c))
v.AddArg(x)
return true
}
if l.Op != Op386MOVLload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(Op386ORLload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (ORLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2)
- // result: (ORLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ // cond: valoff1.canAdd32(off2)
+ // result: (ORLconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
for {
- valoff1 := v.AuxInt
- sym := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2)) {
+ if !(valoff1.canAdd32(off2)) {
break
}
v.reset(Op386ORLconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(base, mem)
return true
}
// match: (ORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ORLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ORLconstmodify [valoff1.addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386ORLconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (ORLload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (ORLload [off1+off2] {sym} val base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386ADDLconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386ORLload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, base, mem)
return true
}
// match: (ORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ORLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386LEAL {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386ORLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (ORLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (ORLmodify [off1+off2] {sym} base val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386ORLmodify)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(base, val, mem)
return true
}
// match: (ORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ORLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386ORLmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
// match: (ROLBconst [c] (ROLBconst [d] x))
// result: (ROLBconst [(c+d)& 7] x)
for {
- c := v.AuxInt
+ c := auxIntToInt8(v.AuxInt)
if v_0.Op != Op386ROLBconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt8(v_0.AuxInt)
x := v_0.Args[0]
v.reset(Op386ROLBconst)
- v.AuxInt = (c + d) & 7
+ v.AuxInt = int8ToAuxInt((c + d) & 7)
v.AddArg(x)
return true
}
// match: (ROLBconst [0] x)
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt8(v.AuxInt) != 0 {
break
}
x := v_0
// match: (ROLLconst [c] (ROLLconst [d] x))
// result: (ROLLconst [(c+d)&31] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != Op386ROLLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(Op386ROLLconst)
- v.AuxInt = (c + d) & 31
+ v.AuxInt = int32ToAuxInt((c + d) & 31)
v.AddArg(x)
return true
}
// match: (ROLLconst [0] x)
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
x := v_0
// match: (ROLWconst [c] (ROLWconst [d] x))
// result: (ROLWconst [(c+d)&15] x)
for {
- c := v.AuxInt
+ c := auxIntToInt16(v.AuxInt)
if v_0.Op != Op386ROLWconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt16(v_0.AuxInt)
x := v_0.Args[0]
v.reset(Op386ROLWconst)
- v.AuxInt = (c + d) & 15
+ v.AuxInt = int16ToAuxInt((c + d) & 15)
v.AddArg(x)
return true
}
// match: (ROLWconst [0] x)
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt16(v.AuxInt) != 0 {
break
}
x := v_0
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SARB x (MOVLconst [c]))
- // result: (SARBconst [min(c&31,7)] x)
+ // result: (SARBconst [int8(min(int64(c&31),7))] x)
for {
x := v_0
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(Op386SARBconst)
- v.AuxInt = min(c&31, 7)
+ v.AuxInt = int8ToAuxInt(int8(min(int64(c&31), 7)))
v.AddArg(x)
return true
}
// match: (SARBconst x [0])
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt8(v.AuxInt) != 0 {
break
}
x := v_0
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(Op386SARLconst)
- v.AuxInt = c & 31
+ v.AuxInt = int32ToAuxInt(c & 31)
v.AddArg(x)
return true
}
// result: (SARL x y)
for {
x := v_0
- if v_1.Op != Op386ANDLconst || v_1.AuxInt != 31 {
+ if v_1.Op != Op386ANDLconst || auxIntToInt32(v_1.AuxInt) != 31 {
break
}
y := v_1.Args[0]
// match: (SARLconst x [0])
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
x := v_0
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SARW x (MOVLconst [c]))
- // result: (SARWconst [min(c&31,15)] x)
+ // result: (SARWconst [int16(min(int64(c&31),15))] x)
for {
x := v_0
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(Op386SARWconst)
- v.AuxInt = min(c&31, 15)
+ v.AuxInt = int16ToAuxInt(int16(min(int64(c&31), 15)))
v.AddArg(x)
return true
}
// match: (SARWconst x [0])
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt16(v.AuxInt) != 0 {
break
}
x := v_0
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
f := v_2
v.reset(Op386SBBLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg2(x, f)
return true
}
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(Op386SHLLconst)
- v.AuxInt = c & 31
+ v.AuxInt = int32ToAuxInt(c & 31)
v.AddArg(x)
return true
}
// result: (SHLL x y)
for {
x := v_0
- if v_1.Op != Op386ANDLconst || v_1.AuxInt != 31 {
+ if v_1.Op != Op386ANDLconst || auxIntToInt32(v_1.AuxInt) != 31 {
break
}
y := v_1.Args[0]
// match: (SHLLconst x [0])
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
x := v_0
v_0 := v.Args[0]
// match: (SHRB x (MOVLconst [c]))
// cond: c&31 < 8
- // result: (SHRBconst [c&31] x)
+ // result: (SHRBconst [int8(c&31)] x)
for {
x := v_0
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
if !(c&31 < 8) {
break
}
v.reset(Op386SHRBconst)
- v.AuxInt = c & 31
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
v.AddArg(x)
return true
}
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
if !(c&31 >= 8) {
break
}
v.reset(Op386MOVLconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
return false
// match: (SHRBconst x [0])
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt8(v.AuxInt) != 0 {
break
}
x := v_0
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(Op386SHRLconst)
- v.AuxInt = c & 31
+ v.AuxInt = int32ToAuxInt(c & 31)
v.AddArg(x)
return true
}
// result: (SHRL x y)
for {
x := v_0
- if v_1.Op != Op386ANDLconst || v_1.AuxInt != 31 {
+ if v_1.Op != Op386ANDLconst || auxIntToInt32(v_1.AuxInt) != 31 {
break
}
y := v_1.Args[0]
// match: (SHRLconst x [0])
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
x := v_0
v_0 := v.Args[0]
// match: (SHRW x (MOVLconst [c]))
// cond: c&31 < 16
- // result: (SHRWconst [c&31] x)
+ // result: (SHRWconst [int16(c&31)] x)
for {
x := v_0
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
if !(c&31 < 16) {
break
}
v.reset(Op386SHRWconst)
- v.AuxInt = c & 31
+ v.AuxInt = int16ToAuxInt(int16(c & 31))
v.AddArg(x)
return true
}
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
if !(c&31 >= 16) {
break
}
v.reset(Op386MOVLconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
return false
// match: (SHRWconst x [0])
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt16(v.AuxInt) != 0 {
break
}
x := v_0
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(Op386SUBLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != Op386MOVLconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_1
v.reset(Op386NEGL)
v0 := b.NewValue0(v.Pos, Op386SUBLconst, v.Type)
- v0.AuxInt = c
+ v0.AuxInt = int32ToAuxInt(c)
v0.AddArg(x)
v.AddArg(v0)
return true
if l.Op != Op386MOVLload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
break
}
v.reset(Op386SUBLload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
if v_1.Op != Op386MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(Op386SUBLconstcarry)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
b := v.Block
config := b.Func.Config
// match: (SUBLload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (SUBLload [off1+off2] {sym} val base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386ADDLconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386SUBLload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, base, mem)
return true
}
// match: (SUBLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386LEAL {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386SUBLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (SUBLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (SUBLmodify [off1+off2] {sym} base val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386SUBLmodify)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(base, val, mem)
return true
}
// match: (SUBLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386SUBLmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
if l.Op != Op386MOVSDload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) {
break
}
v.reset(Op386SUBSDload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (SUBSDload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (SUBSDload [off1+off2] {sym} val base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386ADDLconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386SUBSDload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, base, mem)
return true
}
// match: (SUBSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBSDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386LEAL {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386SUBSDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
if l.Op != Op386MOVSSload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) {
break
}
v.reset(Op386SUBSSload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (SUBSSload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (SUBSSload [off1+off2] {sym} val base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386ADDLconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386SUBSSload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, base, mem)
return true
}
// match: (SUBSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBSSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386LEAL {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386SUBSSload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
if v_1.Op != Op386MOVLconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(Op386XORLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
if v_0.Op != Op386SHLLconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != Op386SHRLconst {
continue
}
- d := v_1.AuxInt
+ d := auxIntToInt32(v_1.AuxInt)
if x != v_1.Args[0] || !(d == 32-c) {
continue
}
v.reset(Op386ROLLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
break
}
// match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
- // cond: c < 16 && d == 16-c && t.Size() == 2
- // result: (ROLWconst x [c])
+ // cond: c < 16 && d == int16(16-c) && t.Size() == 2
+ // result: (ROLWconst x [int16(c)])
for {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != Op386SHLLconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != Op386SHRWconst {
continue
}
- d := v_1.AuxInt
- if x != v_1.Args[0] || !(c < 16 && d == 16-c && t.Size() == 2) {
+ d := auxIntToInt16(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 16 && d == int16(16-c) && t.Size() == 2) {
continue
}
v.reset(Op386ROLWconst)
- v.AuxInt = c
+ v.AuxInt = int16ToAuxInt(int16(c))
v.AddArg(x)
return true
}
break
}
// match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
- // cond: c < 8 && d == 8-c && t.Size() == 1
- // result: (ROLBconst x [c])
+ // cond: c < 8 && d == int8(8-c) && t.Size() == 1
+ // result: (ROLBconst x [int8(c)])
for {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != Op386SHLLconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != Op386SHRBconst {
continue
}
- d := v_1.AuxInt
- if x != v_1.Args[0] || !(c < 8 && d == 8-c && t.Size() == 1) {
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 8 && d == int8(8-c) && t.Size() == 1) {
continue
}
v.reset(Op386ROLBconst)
- v.AuxInt = c
+ v.AuxInt = int8ToAuxInt(int8(c))
v.AddArg(x)
return true
}
if l.Op != Op386MOVLload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(Op386XORLload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: (XORLconst [c] (XORLconst [d] x))
// result: (XORLconst [c ^ d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != Op386XORLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(Op386XORLconst)
- v.AuxInt = c ^ d
+ v.AuxInt = int32ToAuxInt(c ^ d)
v.AddArg(x)
return true
}
b := v.Block
config := b.Func.Config
// match: (XORLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2)
- // result: (XORLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ // cond: valoff1.canAdd32(off2)
+ // result: (XORLconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
for {
- valoff1 := v.AuxInt
- sym := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2)) {
+ if !(valoff1.canAdd32(off2)) {
break
}
v.reset(Op386XORLconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(sym)
v.AddArg2(base, mem)
return true
}
// match: (XORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (XORLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (XORLconstmodify [valoff1.addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386XORLconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (XORLload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (XORLload [off1+off2] {sym} val base mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386ADDLconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386XORLload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, base, mem)
return true
}
// match: (XORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (XORLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != Op386LEAL {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386XORLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
b := v.Block
config := b.Func.Config
// match: (XORLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (XORLmodify [off1+off2] {sym} base val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(Op386XORLmodify)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(base, val, mem)
return true
}
// match: (XORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (XORLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != Op386LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(Op386XORLmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
}
// match: (Zero [s] destptr mem)
// cond: s%4 != 0 && s > 4
- // result: (Zero [s-s%4] (ADDLconst destptr [s%4]) (MOVLstoreconst [0] destptr mem))
+ // result: (Zero [s-s%4] (ADDLconst destptr [int32(s%4)]) (MOVLstoreconst [0] destptr mem))
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
destptr := v_0
mem := v_1
if !(s%4 != 0 && s > 4) {
break
}
v.reset(OpZero)
- v.AuxInt = s - s%4
+ v.AuxInt = int64ToAuxInt(s - s%4)
v0 := b.NewValue0(v.Pos, Op386ADDLconst, typ.UInt32)
- v0.AuxInt = s % 4
+ v0.AuxInt = int32ToAuxInt(int32(s % 4))
v0.AddArg(destptr)
v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
- v1.AuxInt = 0
+ v1.AuxInt = valAndOffToAuxInt(0)
v1.AddArg2(destptr, mem)
v.AddArg2(v0, v1)
return true