(MULQconst [c] (NEGQ x)) && c != -(1<<31) -> (MULQconst [-c] x)
// checking AND against 0.
-(CMPQconst a:(ANDQ x y) [0]) && a.Uses == 1 -> (TESTQ x y)
-(CMPLconst a:(ANDL x y) [0]) && a.Uses == 1 -> (TESTL x y)
-(CMPWconst a:(ANDL x y) [0]) && a.Uses == 1 -> (TESTW x y)
-(CMPBconst a:(ANDL x y) [0]) && a.Uses == 1 -> (TESTB x y)
-(CMPQconst a:(ANDQconst [c] x) [0]) && a.Uses == 1 -> (TESTQconst [c] x)
-(CMPLconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 -> (TESTLconst [c] x)
-(CMPWconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 -> (TESTWconst [int64(int16(c))] x)
-(CMPBconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 -> (TESTBconst [int64(int8(c))] x)
+(CMPQconst a:(ANDQ x y) [0]) && a.Uses == 1 => (TESTQ x y)
+(CMPLconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTL x y)
+(CMPWconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTW x y)
+(CMPBconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTB x y)
+(CMPQconst a:(ANDQconst [c] x) [0]) && a.Uses == 1 => (TESTQconst [c] x)
+(CMPLconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTLconst [c] x)
+(CMPWconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTWconst [int16(c)] x)
+(CMPBconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTBconst [int8(c)] x)
// Convert TESTx to TESTxconst if possible.
-(TESTQ (MOVQconst [c]) x) && is32Bit(c) -> (TESTQconst [c] x)
-(TESTL (MOVLconst [c]) x) -> (TESTLconst [c] x)
-(TESTW (MOVLconst [c]) x) -> (TESTWconst [c] x)
-(TESTB (MOVLconst [c]) x) -> (TESTBconst [c] x)
+(TESTQ (MOVQconst [c]) x) && is32Bit(c) => (TESTQconst [int32(c)] x)
+(TESTL (MOVLconst [c]) x) => (TESTLconst [c] x)
+(TESTW (MOVLconst [c]) x) => (TESTWconst [int16(c)] x)
+(TESTB (MOVLconst [c]) x) => (TESTBconst [int8(c)] x)
// TEST %reg,%reg is shorter than CMP
-(CMPQconst x [0]) -> (TESTQ x x)
-(CMPLconst x [0]) -> (TESTL x x)
-(CMPWconst x [0]) -> (TESTW x x)
-(CMPBconst x [0]) -> (TESTB x x)
-(TESTQconst [-1] x) && x.Op != OpAMD64MOVQconst -> (TESTQ x x)
-(TESTLconst [-1] x) && x.Op != OpAMD64MOVLconst -> (TESTL x x)
-(TESTWconst [-1] x) && x.Op != OpAMD64MOVLconst -> (TESTW x x)
-(TESTBconst [-1] x) && x.Op != OpAMD64MOVLconst -> (TESTB x x)
+(CMPQconst x [0]) => (TESTQ x x)
+(CMPLconst x [0]) => (TESTL x x)
+(CMPWconst x [0]) => (TESTW x x)
+(CMPBconst x [0]) => (TESTB x x)
+(TESTQconst [-1] x) && x.Op != OpAMD64MOVQconst => (TESTQ x x)
+(TESTLconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTL x x)
+(TESTWconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTW x x)
+(TESTBconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTB x x)
// Convert LEAQ1 back to ADDQ if we can
-(LEAQ1 [0] x y) && v.Aux == nil -> (ADDQ x y)
+(LEAQ1 [0] x y) && v.Aux == nil => (ADDQ x y)
// Combining byte loads into larger (unaligned) loads.
// There are many ways these combinations could occur. This is
&& sh.Uses == 1
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, sh)
- -> @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
+ => @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
(OR(L|Q) x0:(MOVBload [i] {s} p0 mem)
sh:(SHL(L|Q)const [8] x1:(MOVBload [i] {s} p1 mem)))
&& sequentialAddresses(p0, p1, 1)
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, sh)
- -> @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
+ => @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
(OR(L|Q) x0:(MOVWload [i0] {s} p mem)
sh:(SHL(L|Q)const [16] x1:(MOVWload [i1] {s} p mem)))
&& sh.Uses == 1
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, sh)
- -> @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
+ => @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
(OR(L|Q) x0:(MOVWload [i] {s} p0 mem)
sh:(SHL(L|Q)const [16] x1:(MOVWload [i] {s} p1 mem)))
&& sequentialAddresses(p0, p1, 2)
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, sh)
- -> @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem)
+ => @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem)
(ORQ x0:(MOVLload [i0] {s} p mem)
sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)))
&& sh.Uses == 1
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, sh)
- -> @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem)
+ => @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem)
(ORQ x0:(MOVLload [i] {s} p0 mem)
sh:(SHLQconst [32] x1:(MOVLload [i] {s} p1 mem)))
&& sequentialAddresses(p0, p1, 4)
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, sh)
- -> @mergePoint(b,x0,x1) (MOVQload [i] {s} p0 mem)
+ => @mergePoint(b,x0,x1) (MOVQload [i] {s} p0 mem)
(OR(L|Q)
s1:(SHL(L|Q)const [j1] x1:(MOVBload [i1] {s} p mem))
&& or.Uses == 1
&& mergePoint(b,x0,x1,y) != nil
&& clobber(x0, x1, s0, s1, or)
- -> @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
+ => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
(OR(L|Q)
s1:(SHL(L|Q)const [j1] x1:(MOVBload [i] {s} p1 mem))
&& sequentialAddresses(p0, p1, 1)
&& mergePoint(b,x0,x1,y) != nil
&& clobber(x0, x1, s0, s1, or)
- -> @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y)
+ => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y)
(ORQ
s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))
&& or.Uses == 1
&& mergePoint(b,x0,x1,y) != nil
&& clobber(x0, x1, s0, s1, or)
- -> @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y)
+ => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y)
(ORQ
s1:(SHLQconst [j1] x1:(MOVWload [i] {s} p1 mem))
&& sequentialAddresses(p0, p1, 2)
&& mergePoint(b,x0,x1,y) != nil
&& clobber(x0, x1, s0, s1, or)
- -> @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i] {s} p0 mem)) y)
+ => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i] {s} p0 mem)) y)
// Big-endian loads
&& sh.Uses == 1
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, sh)
- -> @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
+ => @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
(OR(L|Q)
x1:(MOVBload [i] {s} p1 mem)
&& sequentialAddresses(p0, p1, 1)
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, sh)
- -> @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem))
+ => @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem))
(OR(L|Q)
r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))
&& sh.Uses == 1
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, r0, r1, sh)
- -> @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
+ => @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
(OR(L|Q)
r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem))
&& sequentialAddresses(p0, p1, 2)
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, r0, r1, sh)
- -> @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem))
+ => @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem))
(ORQ
r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))
&& sh.Uses == 1
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, r0, r1, sh)
- -> @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem))
+ => @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem))
(ORQ
r1:(BSWAPL x1:(MOVLload [i] {s} p1 mem))
&& sequentialAddresses(p0, p1, 4)
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, r0, r1, sh)
- -> @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i] {s} p0 mem))
+ => @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i] {s} p0 mem))
(OR(L|Q)
s0:(SHL(L|Q)const [j0] x0:(MOVBload [i0] {s} p mem))
&& or.Uses == 1
&& mergePoint(b,x0,x1,y) != nil
&& clobber(x0, x1, s0, s1, or)
- -> @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+ => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
(OR(L|Q)
s0:(SHL(L|Q)const [j0] x0:(MOVBload [i] {s} p0 mem))
&& sequentialAddresses(p0, p1, 1)
&& mergePoint(b,x0,x1,y) != nil
&& clobber(x0, x1, s0, s1, or)
- -> @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y)
+ => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y)
(ORQ
s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))
&& or.Uses == 1
&& mergePoint(b,x0,x1,y) != nil
&& clobber(x0, x1, r0, r1, s0, s1, or)
- -> @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
+ => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
(ORQ
s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem)))
&& sequentialAddresses(p0, p1, 2)
&& mergePoint(b,x0,x1,y) != nil
&& clobber(x0, x1, r0, r1, s0, s1, or)
- -> @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i] {s} p0 mem))) y)
+ => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i] {s} p0 mem))) y)
// Combine 2 byte stores + shift into rolw 8 + word store
(MOVBstore [i] {s} p w
x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
&& x0.Uses == 1
&& clobber(x0)
- -> (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
+ => (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
(MOVBstore [i] {s} p1 w
x0:(MOVBstore [i] {s} p0 (SHRWconst [8] w) mem))
&& x0.Uses == 1
&& sequentialAddresses(p0, p1, 1)
&& clobber(x0)
- -> (MOVWstore [i] {s} p0 (ROLWconst <w.Type> [8] w) mem)
+ => (MOVWstore [i] {s} p0 (ROLWconst <w.Type> [8] w) mem)
// Combine stores + shifts into bswap and larger (unaligned) stores
(MOVBstore [i] {s} p w
&& x1.Uses == 1
&& x2.Uses == 1
&& clobber(x0, x1, x2)
- -> (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
+ => (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
(MOVBstore [i] {s} p3 w
x2:(MOVBstore [i] {s} p2 (SHRLconst [8] w)
x1:(MOVBstore [i] {s} p1 (SHRLconst [16] w)
&& sequentialAddresses(p1, p2, 1)
&& sequentialAddresses(p2, p3, 1)
&& clobber(x0, x1, x2)
- -> (MOVLstore [i] {s} p0 (BSWAPL <w.Type> w) mem)
+ => (MOVLstore [i] {s} p0 (BSWAPL <w.Type> w) mem)
(MOVBstore [i] {s} p w
x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w)
&& x5.Uses == 1
&& x6.Uses == 1
&& clobber(x0, x1, x2, x3, x4, x5, x6)
- -> (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
+ => (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
(MOVBstore [i] {s} p7 w
x6:(MOVBstore [i] {s} p6 (SHRQconst [8] w)
x5:(MOVBstore [i] {s} p5 (SHRQconst [16] w)
&& sequentialAddresses(p5, p6, 1)
&& sequentialAddresses(p6, p7, 1)
&& clobber(x0, x1, x2, x3, x4, x5, x6)
- -> (MOVQstore [i] {s} p0 (BSWAPQ <w.Type> w) mem)
+ => (MOVQstore [i] {s} p0 (BSWAPQ <w.Type> w) mem)
// Combine constant stores into larger (unaligned) stores.
(MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
&& x.Uses == 1
- && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
+ && a.Off() + 1 == c.Off()
&& clobber(x)
- -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
+ => (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
(MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
&& x.Uses == 1
- && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
+ && a.Off() + 1 == c.Off()
&& clobber(x)
- -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
+ => (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
&& x.Uses == 1
- && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
+ && a.Off() + 2 == c.Off()
&& clobber(x)
- -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+ => (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
(MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
&& x.Uses == 1
- && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
+ && a.Off() + 2 == c.Off()
&& clobber(x)
- -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+ => (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
(MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
&& x.Uses == 1
- && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
+ && a.Off() + 4 == c.Off()
&& clobber(x)
- -> (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
+ => (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem)
(MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem))
&& x.Uses == 1
- && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
+ && a.Off() + 4 == c.Off()
&& clobber(x)
- -> (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
+ => (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem)
(MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem))
&& config.useSSE
&& x.Uses == 1
- && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off()
- && ValAndOff(c).Val() == 0
- && ValAndOff(c2).Val() == 0
+ && c2.Off() + 8 == c.Off()
+ && c.Val() == 0
+ && c2.Val() == 0
&& clobber(x)
- -> (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem)
+ => (MOVOstore [c2.Off32()] {s} p (MOVOconst [0]) mem)
// Combine stores into larger (unaligned) stores. Little endian.
(MOVBstore [i] {s} p (SHR(W|L|Q)const [8] w) x:(MOVBstore [i-1] {s} p w mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVWstore [i-1] {s} p w mem)
+ => (MOVWstore [i-1] {s} p w mem)
(MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHR(W|L|Q)const [8] w) mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVWstore [i] {s} p w mem)
+ => (MOVWstore [i] {s} p w mem)
(MOVBstore [i] {s} p (SHR(L|Q)const [j] w) x:(MOVBstore [i-1] {s} p w0:(SHR(L|Q)const [j-8] w) mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVWstore [i-1] {s} p w0 mem)
+ => (MOVWstore [i-1] {s} p w0 mem)
(MOVBstore [i] {s} p1 (SHR(W|L|Q)const [8] w) x:(MOVBstore [i] {s} p0 w mem))
&& x.Uses == 1
&& sequentialAddresses(p0, p1, 1)
&& clobber(x)
- -> (MOVWstore [i] {s} p0 w mem)
+ => (MOVWstore [i] {s} p0 w mem)
(MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHR(W|L|Q)const [8] w) mem))
&& x.Uses == 1
&& sequentialAddresses(p0, p1, 1)
&& clobber(x)
- -> (MOVWstore [i] {s} p0 w mem)
+ => (MOVWstore [i] {s} p0 w mem)
(MOVBstore [i] {s} p1 (SHR(L|Q)const [j] w) x:(MOVBstore [i] {s} p0 w0:(SHR(L|Q)const [j-8] w) mem))
&& x.Uses == 1
&& sequentialAddresses(p0, p1, 1)
&& clobber(x)
- -> (MOVWstore [i] {s} p0 w0 mem)
+ => (MOVWstore [i] {s} p0 w0 mem)
(MOVWstore [i] {s} p (SHR(L|Q)const [16] w) x:(MOVWstore [i-2] {s} p w mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVLstore [i-2] {s} p w mem)
+ => (MOVLstore [i-2] {s} p w mem)
(MOVWstore [i] {s} p (SHR(L|Q)const [j] w) x:(MOVWstore [i-2] {s} p w0:(SHR(L|Q)const [j-16] w) mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVLstore [i-2] {s} p w0 mem)
+ => (MOVLstore [i-2] {s} p w0 mem)
(MOVWstore [i] {s} p1 (SHR(L|Q)const [16] w) x:(MOVWstore [i] {s} p0 w mem))
&& x.Uses == 1
&& sequentialAddresses(p0, p1, 2)
&& clobber(x)
- -> (MOVLstore [i] {s} p0 w mem)
+ => (MOVLstore [i] {s} p0 w mem)
(MOVWstore [i] {s} p1 (SHR(L|Q)const [j] w) x:(MOVWstore [i] {s} p0 w0:(SHR(L|Q)const [j-16] w) mem))
&& x.Uses == 1
&& sequentialAddresses(p0, p1, 2)
&& clobber(x)
- -> (MOVLstore [i] {s} p0 w0 mem)
+ => (MOVLstore [i] {s} p0 w0 mem)
(MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVQstore [i-4] {s} p w mem)
+ => (MOVQstore [i-4] {s} p w mem)
(MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVQstore [i-4] {s} p w0 mem)
+ => (MOVQstore [i-4] {s} p w0 mem)
(MOVLstore [i] {s} p1 (SHRQconst [32] w) x:(MOVLstore [i] {s} p0 w mem))
&& x.Uses == 1
&& sequentialAddresses(p0, p1, 4)
&& clobber(x)
- -> (MOVQstore [i] {s} p0 w mem)
+ => (MOVQstore [i] {s} p0 w mem)
(MOVLstore [i] {s} p1 (SHRQconst [j] w) x:(MOVLstore [i] {s} p0 w0:(SHRQconst [j-32] w) mem))
&& x.Uses == 1
&& sequentialAddresses(p0, p1, 4)
&& clobber(x)
- -> (MOVQstore [i] {s} p0 w0 mem)
+ => (MOVQstore [i] {s} p0 w0 mem)
(MOVBstore [i] {s} p
x1:(MOVBload [j] {s2} p2 mem)
&& x2.Uses == 1
&& mem2.Uses == 1
&& clobber(x1, x2, mem2)
- -> (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
+ => (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
(MOVWstore [i] {s} p
x1:(MOVWload [j] {s2} p2 mem)
&& x2.Uses == 1
&& mem2.Uses == 1
&& clobber(x1, x2, mem2)
- -> (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
+ => (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
(MOVLstore [i] {s} p
x1:(MOVLload [j] {s2} p2 mem)
&& x2.Uses == 1
&& mem2.Uses == 1
&& clobber(x1, x2, mem2)
- -> (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
+ => (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
// cond: a.Uses == 1
// result: (TESTB x y)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt8(v.AuxInt) != 0 {
break
}
a := v_0
}
// match: (CMPBconst a:(ANDLconst [c] x) [0])
// cond: a.Uses == 1
- // result: (TESTBconst [int64(int8(c))] x)
+ // result: (TESTBconst [int8(c)] x)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt8(v.AuxInt) != 0 {
break
}
a := v_0
if a.Op != OpAMD64ANDLconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt32(a.AuxInt)
x := a.Args[0]
if !(a.Uses == 1) {
break
}
v.reset(OpAMD64TESTBconst)
- v.AuxInt = int64(int8(c))
+ v.AuxInt = int8ToAuxInt(int8(c))
v.AddArg(x)
return true
}
// match: (CMPBconst x [0])
// result: (TESTB x x)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt8(v.AuxInt) != 0 {
break
}
x := v_0
// cond: a.Uses == 1
// result: (TESTL x y)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
a := v_0
// cond: a.Uses == 1
// result: (TESTLconst [c] x)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
a := v_0
if a.Op != OpAMD64ANDLconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt32(a.AuxInt)
x := a.Args[0]
if !(a.Uses == 1) {
break
}
v.reset(OpAMD64TESTLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (CMPLconst x [0])
// result: (TESTL x x)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
x := v_0
// cond: a.Uses == 1
// result: (TESTQ x y)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
a := v_0
// cond: a.Uses == 1
// result: (TESTQconst [c] x)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
a := v_0
if a.Op != OpAMD64ANDQconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt32(a.AuxInt)
x := a.Args[0]
if !(a.Uses == 1) {
break
}
v.reset(OpAMD64TESTQconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (CMPQconst x [0])
// result: (TESTQ x x)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
x := v_0
// cond: a.Uses == 1
// result: (TESTW x y)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt16(v.AuxInt) != 0 {
break
}
a := v_0
}
// match: (CMPWconst a:(ANDLconst [c] x) [0])
// cond: a.Uses == 1
- // result: (TESTWconst [int64(int16(c))] x)
+ // result: (TESTWconst [int16(c)] x)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt16(v.AuxInt) != 0 {
break
}
a := v_0
if a.Op != OpAMD64ANDLconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt32(a.AuxInt)
x := a.Args[0]
if !(a.Uses == 1) {
break
}
v.reset(OpAMD64TESTWconst)
- v.AuxInt = int64(int16(c))
+ v.AuxInt = int16ToAuxInt(int16(c))
v.AddArg(x)
return true
}
// match: (CMPWconst x [0])
// result: (TESTW x x)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt16(v.AuxInt) != 0 {
break
}
x := v_0
// cond: v.Aux == nil
// result: (ADDQ x y)
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
x := v_0
// cond: x0.Uses == 1 && clobber(x0)
// result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x0 := v_2
- if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s {
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpAMD64SHRWconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && clobber(x0)) {
+ if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && clobber(x0)) {
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
- v0.AuxInt = 8
+ v0.AuxInt = int8ToAuxInt(8)
v0.AddArg(w)
v.AddArg3(p, v0, mem)
return true
// cond: x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)
// result: (MOVWstore [i] {s} p0 (ROLWconst <w.Type> [8] w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p1 := v_0
w := v_1
x0 := v_2
- if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i || x0.Aux != s {
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
p0 := x0.Args[0]
x0_1 := x0.Args[1]
- if x0_1.Op != OpAMD64SHRWconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)) {
+ if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)) {
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
- v0.AuxInt = 8
+ v0.AuxInt = int8ToAuxInt(8)
v0.AddArg(w)
v.AddArg3(p0, v0, mem)
return true
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
// result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x2 := v_2
- if x2.Op != OpAMD64MOVBstore || x2.AuxInt != i-1 || x2.Aux != s {
+ if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-1 || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[2]
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpAMD64SHRLconst || x2_1.AuxInt != 8 || w != x2_1.Args[0] {
+ if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
break
}
x1 := x2.Args[2]
- if x1.Op != OpAMD64MOVBstore || x1.AuxInt != i-2 || x1.Aux != s {
+ if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpAMD64SHRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
+ if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
break
}
x0 := x1.Args[2]
- if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i-3 || x0.Aux != s {
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-3 || auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpAMD64SHRLconst || x0_1.AuxInt != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
break
}
v.reset(OpAMD64MOVLstore)
- v.AuxInt = i - 3
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 3)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
v0.AddArg(w)
v.AddArg3(p, v0, mem)
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)
// result: (MOVLstore [i] {s} p0 (BSWAPL <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p3 := v_0
w := v_1
x2 := v_2
- if x2.Op != OpAMD64MOVBstore || x2.AuxInt != i || x2.Aux != s {
+ if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[2]
p2 := x2.Args[0]
x2_1 := x2.Args[1]
- if x2_1.Op != OpAMD64SHRLconst || x2_1.AuxInt != 8 || w != x2_1.Args[0] {
+ if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
break
}
x1 := x2.Args[2]
- if x1.Op != OpAMD64MOVBstore || x1.AuxInt != i || x1.Aux != s {
+ if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
p1 := x1.Args[0]
x1_1 := x1.Args[1]
- if x1_1.Op != OpAMD64SHRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
+ if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
break
}
x0 := x1.Args[2]
- if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i || x0.Aux != s {
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
p0 := x0.Args[0]
x0_1 := x0.Args[1]
- if x0_1.Op != OpAMD64SHRLconst || x0_1.AuxInt != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)) {
+ if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)) {
break
}
v.reset(OpAMD64MOVLstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
v0.AddArg(w)
v.AddArg3(p0, v0, mem)
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)
// result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x6 := v_2
- if x6.Op != OpAMD64MOVBstore || x6.AuxInt != i-1 || x6.Aux != s {
+ if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i-1 || auxToSym(x6.Aux) != s {
break
}
_ = x6.Args[2]
break
}
x6_1 := x6.Args[1]
- if x6_1.Op != OpAMD64SHRQconst || x6_1.AuxInt != 8 || w != x6_1.Args[0] {
+ if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
break
}
x5 := x6.Args[2]
- if x5.Op != OpAMD64MOVBstore || x5.AuxInt != i-2 || x5.Aux != s {
+ if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i-2 || auxToSym(x5.Aux) != s {
break
}
_ = x5.Args[2]
break
}
x5_1 := x5.Args[1]
- if x5_1.Op != OpAMD64SHRQconst || x5_1.AuxInt != 16 || w != x5_1.Args[0] {
+ if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
break
}
x4 := x5.Args[2]
- if x4.Op != OpAMD64MOVBstore || x4.AuxInt != i-3 || x4.Aux != s {
+ if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i-3 || auxToSym(x4.Aux) != s {
break
}
_ = x4.Args[2]
break
}
x4_1 := x4.Args[1]
- if x4_1.Op != OpAMD64SHRQconst || x4_1.AuxInt != 24 || w != x4_1.Args[0] {
+ if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
break
}
x3 := x4.Args[2]
- if x3.Op != OpAMD64MOVBstore || x3.AuxInt != i-4 || x3.Aux != s {
+ if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[2]
break
}
x3_1 := x3.Args[1]
- if x3_1.Op != OpAMD64SHRQconst || x3_1.AuxInt != 32 || w != x3_1.Args[0] {
+ if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
break
}
x2 := x3.Args[2]
- if x2.Op != OpAMD64MOVBstore || x2.AuxInt != i-5 || x2.Aux != s {
+ if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-5 || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[2]
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpAMD64SHRQconst || x2_1.AuxInt != 40 || w != x2_1.Args[0] {
+ if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
break
}
x1 := x2.Args[2]
- if x1.Op != OpAMD64MOVBstore || x1.AuxInt != i-6 || x1.Aux != s {
+ if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-6 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpAMD64SHRQconst || x1_1.AuxInt != 48 || w != x1_1.Args[0] {
+ if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
break
}
x0 := x1.Args[2]
- if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i-7 || x0.Aux != s {
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-7 || auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpAMD64SHRQconst || x0_1.AuxInt != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
break
}
v.reset(OpAMD64MOVQstore)
- v.AuxInt = i - 7
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 7)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
v0.AddArg(w)
v.AddArg3(p, v0, mem)
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)
// result: (MOVQstore [i] {s} p0 (BSWAPQ <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p7 := v_0
w := v_1
x6 := v_2
- if x6.Op != OpAMD64MOVBstore || x6.AuxInt != i || x6.Aux != s {
+ if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i || auxToSym(x6.Aux) != s {
break
}
_ = x6.Args[2]
p6 := x6.Args[0]
x6_1 := x6.Args[1]
- if x6_1.Op != OpAMD64SHRQconst || x6_1.AuxInt != 8 || w != x6_1.Args[0] {
+ if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
break
}
x5 := x6.Args[2]
- if x5.Op != OpAMD64MOVBstore || x5.AuxInt != i || x5.Aux != s {
+ if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i || auxToSym(x5.Aux) != s {
break
}
_ = x5.Args[2]
p5 := x5.Args[0]
x5_1 := x5.Args[1]
- if x5_1.Op != OpAMD64SHRQconst || x5_1.AuxInt != 16 || w != x5_1.Args[0] {
+ if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
break
}
x4 := x5.Args[2]
- if x4.Op != OpAMD64MOVBstore || x4.AuxInt != i || x4.Aux != s {
+ if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i || auxToSym(x4.Aux) != s {
break
}
_ = x4.Args[2]
p4 := x4.Args[0]
x4_1 := x4.Args[1]
- if x4_1.Op != OpAMD64SHRQconst || x4_1.AuxInt != 24 || w != x4_1.Args[0] {
+ if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
break
}
x3 := x4.Args[2]
- if x3.Op != OpAMD64MOVBstore || x3.AuxInt != i || x3.Aux != s {
+ if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i || auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[2]
p3 := x3.Args[0]
x3_1 := x3.Args[1]
- if x3_1.Op != OpAMD64SHRQconst || x3_1.AuxInt != 32 || w != x3_1.Args[0] {
+ if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
break
}
x2 := x3.Args[2]
- if x2.Op != OpAMD64MOVBstore || x2.AuxInt != i || x2.Aux != s {
+ if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[2]
p2 := x2.Args[0]
x2_1 := x2.Args[1]
- if x2_1.Op != OpAMD64SHRQconst || x2_1.AuxInt != 40 || w != x2_1.Args[0] {
+ if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
break
}
x1 := x2.Args[2]
- if x1.Op != OpAMD64MOVBstore || x1.AuxInt != i || x1.Aux != s {
+ if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
p1 := x1.Args[0]
x1_1 := x1.Args[1]
- if x1_1.Op != OpAMD64SHRQconst || x1_1.AuxInt != 48 || w != x1_1.Args[0] {
+ if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
break
}
x0 := x1.Args[2]
- if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i || x0.Aux != s {
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
p0 := x0.Args[0]
x0_1 := x0.Args[1]
- if x0_1.Op != OpAMD64SHRQconst || x0_1.AuxInt != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
break
}
v.reset(OpAMD64MOVQstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
v0.AddArg(w)
v.AddArg3(p0, v0, mem)
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
- if v_1.Op != OpAMD64SHRWconst || v_1.AuxInt != 8 {
+ if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
- if v_1.Op != OpAMD64SHRLconst || v_1.AuxInt != 8 {
+ if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
- if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 8 {
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i] {s} p w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x := v_2
- if x.Op != OpAMD64MOVBstore || x.AuxInt != i+1 || x.Aux != s {
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
x_1 := x.Args[1]
- if x_1.Op != OpAMD64SHRWconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i] {s} p w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x := v_2
- if x.Op != OpAMD64MOVBstore || x.AuxInt != i+1 || x.Aux != s {
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
x_1 := x.Args[1]
- if x_1.Op != OpAMD64SHRLconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i] {s} p w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x := v_2
- if x.Op != OpAMD64MOVBstore || x.AuxInt != i+1 || x.Aux != s {
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
x_1 := x.Args[1]
- if x_1.Op != OpAMD64SHRQconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
if v_1.Op != OpAMD64SHRLconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt8(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
w0 := x.Args[1]
- if w0.Op != OpAMD64SHRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(p, w0, mem)
return true
}
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
if v_1.Op != OpAMD64SHRQconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt8(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
w0 := x.Args[1]
- if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(p, w0, mem)
return true
}
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstore [i] {s} p0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p1 := v_0
- if v_1.Op != OpAMD64SHRWconst || v_1.AuxInt != 8 {
+ if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s {
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p0, w, mem)
return true
}
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstore [i] {s} p0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p1 := v_0
- if v_1.Op != OpAMD64SHRLconst || v_1.AuxInt != 8 {
+ if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s {
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p0, w, mem)
return true
}
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstore [i] {s} p0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p1 := v_0
- if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 8 {
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s {
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p0, w, mem)
return true
}
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstore [i] {s} p0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p0 := v_0
w := v_1
x := v_2
- if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s {
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
p1 := x.Args[0]
x_1 := x.Args[1]
- if x_1.Op != OpAMD64SHRWconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p0, w, mem)
return true
}
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstore [i] {s} p0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p0 := v_0
w := v_1
x := v_2
- if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s {
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
p1 := x.Args[0]
x_1 := x.Args[1]
- if x_1.Op != OpAMD64SHRLconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p0, w, mem)
return true
}
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstore [i] {s} p0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p0 := v_0
w := v_1
x := v_2
- if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s {
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
p1 := x.Args[0]
x_1 := x.Args[1]
- if x_1.Op != OpAMD64SHRQconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p0, w, mem)
return true
}
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstore [i] {s} p0 w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p1 := v_0
if v_1.Op != OpAMD64SHRLconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt8(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s {
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
p0 := x.Args[0]
w0 := x.Args[1]
- if w0.Op != OpAMD64SHRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p0, w0, mem)
return true
}
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstore [i] {s} p0 w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p1 := v_0
if v_1.Op != OpAMD64SHRQconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt8(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s {
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
p0 := x.Args[0]
w0 := x.Args[1]
- if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p0, w0, mem)
return true
}
// cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
// result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
x1 := v_1
if x1.Op != OpAMD64MOVBload {
break
}
- j := x1.AuxInt
- s2 := x1.Aux
+ j := auxIntToInt32(x1.AuxInt)
+ s2 := auxToSym(x1.Aux)
mem := x1.Args[1]
p2 := x1.Args[0]
mem2 := v_2
- if mem2.Op != OpAMD64MOVBstore || mem2.AuxInt != i-1 || mem2.Aux != s {
+ if mem2.Op != OpAMD64MOVBstore || auxIntToInt32(mem2.AuxInt) != i-1 || auxToSym(mem2.Aux) != s {
break
}
_ = mem2.Args[2]
break
}
x2 := mem2.Args[1]
- if x2.Op != OpAMD64MOVBload || x2.AuxInt != j-1 || x2.Aux != s2 {
+ if x2.Op != OpAMD64MOVBload || auxIntToInt32(x2.AuxInt) != j-1 || auxToSym(x2.Aux) != s2 {
break
}
_ = x2.Args[1]
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16)
- v0.AuxInt = j - 1
- v0.Aux = s2
+ v0.AuxInt = int32ToAuxInt(j - 1)
+ v0.Aux = symToAux(s2)
v0.AddArg2(p2, mem)
v.AddArg3(p, v0, mem)
return true
return true
}
// match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
- // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x)
- // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
+ // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
x := v_1
if x.Op != OpAMD64MOVBstoreconst {
break
}
- a := x.AuxInt
- if x.Aux != s {
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
break
}
mem := x.Args[1]
- if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstoreconst)
- v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
- v.Aux = s
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xff|c.Val()<<8, a.Off()))
+ v.Aux = symToAux(s)
v.AddArg2(p, mem)
return true
}
// match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
- // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x)
- // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
+ // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
for {
- a := v.AuxInt
- s := v.Aux
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
x := v_1
if x.Op != OpAMD64MOVBstoreconst {
break
}
- c := x.AuxInt
- if x.Aux != s {
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
break
}
mem := x.Args[1]
- if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstoreconst)
- v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
- v.Aux = s
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xff|c.Val()<<8, a.Off()))
+ v.Aux = symToAux(s)
v.AddArg2(p, mem)
return true
}
// cond: x.Uses == 1 && clobber(x)
// result: (MOVQstore [i-4] {s} p w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
- if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 32 {
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVLstore || x.AuxInt != i-4 || x.Aux != s {
+ if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpAMD64MOVQstore)
- v.AuxInt = i - 4
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// cond: x.Uses == 1 && clobber(x)
// result: (MOVQstore [i-4] {s} p w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
if v_1.Op != OpAMD64SHRQconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt8(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVLstore || x.AuxInt != i-4 || x.Aux != s {
+ if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
w0 := x.Args[1]
- if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVQstore)
- v.AuxInt = i - 4
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
v.AddArg3(p, w0, mem)
return true
}
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)
// result: (MOVQstore [i] {s} p0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p1 := v_0
- if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 32 {
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVLstore || x.AuxInt != i || x.Aux != s {
+ if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpAMD64MOVQstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p0, w, mem)
return true
}
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)
// result: (MOVQstore [i] {s} p0 w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p1 := v_0
if v_1.Op != OpAMD64SHRQconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt8(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVLstore || x.AuxInt != i || x.Aux != s {
+ if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
p0 := x.Args[0]
w0 := x.Args[1]
- if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
break
}
v.reset(OpAMD64MOVQstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p0, w0, mem)
return true
}
// cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
// result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
x1 := v_1
if x1.Op != OpAMD64MOVLload {
break
}
- j := x1.AuxInt
- s2 := x1.Aux
+ j := auxIntToInt32(x1.AuxInt)
+ s2 := auxToSym(x1.Aux)
mem := x1.Args[1]
p2 := x1.Args[0]
mem2 := v_2
- if mem2.Op != OpAMD64MOVLstore || mem2.AuxInt != i-4 || mem2.Aux != s {
+ if mem2.Op != OpAMD64MOVLstore || auxIntToInt32(mem2.AuxInt) != i-4 || auxToSym(mem2.Aux) != s {
break
}
_ = mem2.Args[2]
break
}
x2 := mem2.Args[1]
- if x2.Op != OpAMD64MOVLload || x2.AuxInt != j-4 || x2.Aux != s2 {
+ if x2.Op != OpAMD64MOVLload || auxIntToInt32(x2.AuxInt) != j-4 || auxToSym(x2.Aux) != s2 {
break
}
_ = x2.Args[1]
break
}
v.reset(OpAMD64MOVQstore)
- v.AuxInt = i - 4
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64)
- v0.AuxInt = j - 4
- v0.Aux = s2
+ v0.AuxInt = int32ToAuxInt(j - 4)
+ v0.Aux = symToAux(s2)
v0.AddArg2(p2, mem)
v.AddArg3(p, v0, mem)
return true
return true
}
// match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
- // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x)
- // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
+ // cond: x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x)
+ // result: (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
x := v_1
if x.Op != OpAMD64MOVLstoreconst {
break
}
- a := x.AuxInt
- if x.Aux != s {
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
break
}
mem := x.Args[1]
- if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) {
break
}
v.reset(OpAMD64MOVQstore)
- v.AuxInt = ValAndOff(a).Off()
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(a.Off32())
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
- v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
+ v0.AuxInt = int64ToAuxInt(a.Val()&0xffffffff | c.Val()<<32)
v.AddArg3(p, v0, mem)
return true
}
// match: (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem))
- // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x)
- // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
+ // cond: x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x)
+ // result: (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem)
for {
- a := v.AuxInt
- s := v.Aux
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
x := v_1
if x.Op != OpAMD64MOVLstoreconst {
break
}
- c := x.AuxInt
- if x.Aux != s {
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
break
}
mem := x.Args[1]
- if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) {
break
}
v.reset(OpAMD64MOVQstore)
- v.AuxInt = ValAndOff(a).Off()
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(a.Off32())
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
- v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
+ v0.AuxInt = int64ToAuxInt(a.Val()&0xffffffff | c.Val()<<32)
v.AddArg3(p, v0, mem)
return true
}
return true
}
// match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem))
- // cond: config.useSSE && x.Uses == 1 && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x)
- // result: (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem)
+ // cond: config.useSSE && x.Uses == 1 && c2.Off() + 8 == c.Off() && c.Val() == 0 && c2.Val() == 0 && clobber(x)
+ // result: (MOVOstore [c2.Off32()] {s} p (MOVOconst [0]) mem)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
x := v_1
if x.Op != OpAMD64MOVQstoreconst {
break
}
- c2 := x.AuxInt
- if x.Aux != s {
+ c2 := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
break
}
mem := x.Args[1]
- if p != x.Args[0] || !(config.useSSE && x.Uses == 1 && ValAndOff(c2).Off()+8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x)) {
+ if p != x.Args[0] || !(config.useSSE && x.Uses == 1 && c2.Off()+8 == c.Off() && c.Val() == 0 && c2.Val() == 0 && clobber(x)) {
break
}
v.reset(OpAMD64MOVOstore)
- v.AuxInt = ValAndOff(c2).Off()
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(c2.Off32())
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x.Pos, OpAMD64MOVOconst, types.TypeInt128)
- v0.AuxInt = 0
+ v0.AuxInt = int128ToAuxInt(0)
v.AddArg3(p, v0, mem)
return true
}
// cond: x.Uses == 1 && clobber(x)
// result: (MOVLstore [i-2] {s} p w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
- if v_1.Op != OpAMD64SHRLconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 16 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVWstore || x.AuxInt != i-2 || x.Aux != s {
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpAMD64MOVLstore)
- v.AuxInt = i - 2
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// cond: x.Uses == 1 && clobber(x)
// result: (MOVLstore [i-2] {s} p w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
- if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 16 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVWstore || x.AuxInt != i-2 || x.Aux != s {
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpAMD64MOVLstore)
- v.AuxInt = i - 2
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// cond: x.Uses == 1 && clobber(x)
// result: (MOVLstore [i-2] {s} p w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
if v_1.Op != OpAMD64SHRLconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt8(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVWstore || x.AuxInt != i-2 || x.Aux != s {
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
w0 := x.Args[1]
- if w0.Op != OpAMD64SHRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVLstore)
- v.AuxInt = i - 2
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
v.AddArg3(p, w0, mem)
return true
}
// cond: x.Uses == 1 && clobber(x)
// result: (MOVLstore [i-2] {s} p w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
if v_1.Op != OpAMD64SHRQconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt8(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVWstore || x.AuxInt != i-2 || x.Aux != s {
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
w0 := x.Args[1]
- if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVLstore)
- v.AuxInt = i - 2
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
v.AddArg3(p, w0, mem)
return true
}
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
// result: (MOVLstore [i] {s} p0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p1 := v_0
- if v_1.Op != OpAMD64SHRLconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 16 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVWstore || x.AuxInt != i || x.Aux != s {
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpAMD64MOVLstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p0, w, mem)
return true
}
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
// result: (MOVLstore [i] {s} p0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p1 := v_0
- if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 16 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVWstore || x.AuxInt != i || x.Aux != s {
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpAMD64MOVLstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p0, w, mem)
return true
}
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
// result: (MOVLstore [i] {s} p0 w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p1 := v_0
if v_1.Op != OpAMD64SHRLconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt8(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVWstore || x.AuxInt != i || x.Aux != s {
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
p0 := x.Args[0]
w0 := x.Args[1]
- if w0.Op != OpAMD64SHRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
break
}
v.reset(OpAMD64MOVLstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p0, w0, mem)
return true
}
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
// result: (MOVLstore [i] {s} p0 w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p1 := v_0
if v_1.Op != OpAMD64SHRQconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt8(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpAMD64MOVWstore || x.AuxInt != i || x.Aux != s {
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
p0 := x.Args[0]
w0 := x.Args[1]
- if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
break
}
v.reset(OpAMD64MOVLstore)
- v.AuxInt = i
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
v.AddArg3(p0, w0, mem)
return true
}
// cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
// result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
x1 := v_1
if x1.Op != OpAMD64MOVWload {
break
}
- j := x1.AuxInt
- s2 := x1.Aux
+ j := auxIntToInt32(x1.AuxInt)
+ s2 := auxToSym(x1.Aux)
mem := x1.Args[1]
p2 := x1.Args[0]
mem2 := v_2
- if mem2.Op != OpAMD64MOVWstore || mem2.AuxInt != i-2 || mem2.Aux != s {
+ if mem2.Op != OpAMD64MOVWstore || auxIntToInt32(mem2.AuxInt) != i-2 || auxToSym(mem2.Aux) != s {
break
}
_ = mem2.Args[2]
break
}
x2 := mem2.Args[1]
- if x2.Op != OpAMD64MOVWload || x2.AuxInt != j-2 || x2.Aux != s2 {
+ if x2.Op != OpAMD64MOVWload || auxIntToInt32(x2.AuxInt) != j-2 || auxToSym(x2.Aux) != s2 {
break
}
_ = x2.Args[1]
break
}
v.reset(OpAMD64MOVLstore)
- v.AuxInt = i - 2
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32)
- v0.AuxInt = j - 2
- v0.Aux = s2
+ v0.AuxInt = int32ToAuxInt(j - 2)
+ v0.Aux = symToAux(s2)
v0.AddArg2(p2, mem)
v.AddArg3(p, v0, mem)
return true
return true
}
// match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
- // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
- // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+ // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
for {
- c := v.AuxInt
- s := v.Aux
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
x := v_1
if x.Op != OpAMD64MOVWstoreconst {
break
}
- a := x.AuxInt
- if x.Aux != s {
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
break
}
mem := x.Args[1]
- if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) {
break
}
v.reset(OpAMD64MOVLstoreconst)
- v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
- v.Aux = s
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xffff|c.Val()<<16, a.Off()))
+ v.Aux = symToAux(s)
v.AddArg2(p, mem)
return true
}
// match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
- // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
- // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+ // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
for {
- a := v.AuxInt
- s := v.Aux
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
p := v_0
x := v_1
if x.Op != OpAMD64MOVWstoreconst {
break
}
- c := x.AuxInt
- if x.Aux != s {
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
break
}
mem := x.Args[1]
- if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) {
break
}
v.reset(OpAMD64MOVLstoreconst)
- v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
- v.Aux = s
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xffff|c.Val()<<16, a.Off()))
+ v.Aux = symToAux(s)
v.AddArg2(p, mem)
return true
}
if x0.Op != OpAMD64MOVBload {
continue
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 {
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
continue
}
x1 := sh.Args[0]
if x1.Op != OpAMD64MOVBload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
v.copyOf(v0)
- v0.AuxInt = i0
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
v0.AddArg2(p, mem)
return true
}
if x0.Op != OpAMD64MOVBload {
continue
}
- i := x0.AuxInt
- s := x0.Aux
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p0 := x0.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 {
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
continue
}
x1 := sh.Args[0]
- if x1.Op != OpAMD64MOVBload || x1.AuxInt != i || x1.Aux != s {
+ if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
v.copyOf(v0)
- v0.AuxInt = i
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
v0.AddArg2(p0, mem)
return true
}
if x0.Op != OpAMD64MOVWload {
continue
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 {
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
continue
}
x1 := sh.Args[0]
if x1.Op != OpAMD64MOVWload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
v.copyOf(v0)
- v0.AuxInt = i0
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
v0.AddArg2(p, mem)
return true
}
if x0.Op != OpAMD64MOVWload {
continue
}
- i := x0.AuxInt
- s := x0.Aux
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p0 := x0.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 {
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
continue
}
x1 := sh.Args[0]
- if x1.Op != OpAMD64MOVWload || x1.AuxInt != i || x1.Aux != s {
+ if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
v.copyOf(v0)
- v0.AuxInt = i
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
v0.AddArg2(p0, mem)
return true
}
if s1.Op != OpAMD64SHLLconst {
continue
}
- j1 := s1.AuxInt
+ j1 := auxIntToInt8(s1.AuxInt)
x1 := s1.Args[0]
if x1.Op != OpAMD64MOVBload {
continue
}
- i1 := x1.AuxInt
- s := x1.Aux
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
mem := x1.Args[1]
p := x1.Args[0]
or := v_1
if s0.Op != OpAMD64SHLLconst {
continue
}
- j0 := s0.AuxInt
+ j0 := auxIntToInt8(s0.AuxInt)
x0 := s0.Args[0]
if x0.Op != OpAMD64MOVBload {
continue
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
- v1.AuxInt = j0
+ v1.AuxInt = int8ToAuxInt(j0)
v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
- v2.AuxInt = i0
- v2.Aux = s
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
v2.AddArg2(p, mem)
v1.AddArg(v2)
v0.AddArg2(v1, y)
if s1.Op != OpAMD64SHLLconst {
continue
}
- j1 := s1.AuxInt
+ j1 := auxIntToInt8(s1.AuxInt)
x1 := s1.Args[0]
if x1.Op != OpAMD64MOVBload {
continue
}
- i := x1.AuxInt
- s := x1.Aux
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
mem := x1.Args[1]
p1 := x1.Args[0]
or := v_1
if s0.Op != OpAMD64SHLLconst {
continue
}
- j0 := s0.AuxInt
+ j0 := auxIntToInt8(s0.AuxInt)
x0 := s0.Args[0]
- if x0.Op != OpAMD64MOVBload || x0.AuxInt != i || x0.Aux != s {
+ if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
- v1.AuxInt = j0
+ v1.AuxInt = int8ToAuxInt(j0)
v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
- v2.AuxInt = i
- v2.Aux = s
+ v2.AuxInt = int32ToAuxInt(i)
+ v2.Aux = symToAux(s)
v2.AddArg2(p0, mem)
v1.AddArg(v2)
v0.AddArg2(v1, y)
if x1.Op != OpAMD64MOVBload {
continue
}
- i1 := x1.AuxInt
- s := x1.Aux
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
mem := x1.Args[1]
p := x1.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 {
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
continue
}
x0 := sh.Args[0]
if x0.Op != OpAMD64MOVBload {
continue
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
v.copyOf(v0)
- v0.AuxInt = 8
+ v0.AuxInt = int8ToAuxInt(8)
v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
- v1.AuxInt = i0
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
v1.AddArg2(p, mem)
v0.AddArg(v1)
return true
if x1.Op != OpAMD64MOVBload {
continue
}
- i := x1.AuxInt
- s := x1.Aux
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
mem := x1.Args[1]
p1 := x1.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 {
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
continue
}
x0 := sh.Args[0]
- if x0.Op != OpAMD64MOVBload || x0.AuxInt != i || x0.Aux != s {
+ if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
v.copyOf(v0)
- v0.AuxInt = 8
+ v0.AuxInt = int8ToAuxInt(8)
v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
- v1.AuxInt = i
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i)
+ v1.Aux = symToAux(s)
v1.AddArg2(p0, mem)
v0.AddArg(v1)
return true
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
r1 := v_0
- if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 {
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
continue
}
x1 := r1.Args[0]
if x1.Op != OpAMD64MOVWload {
continue
}
- i1 := x1.AuxInt
- s := x1.Aux
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
mem := x1.Args[1]
p := x1.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 {
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
continue
}
r0 := sh.Args[0]
- if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 {
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
continue
}
x0 := r0.Args[0]
if x0.Op != OpAMD64MOVWload {
continue
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
- v1.AuxInt = i0
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
v1.AddArg2(p, mem)
v0.AddArg(v1)
return true
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
r1 := v_0
- if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 {
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
continue
}
x1 := r1.Args[0]
if x1.Op != OpAMD64MOVWload {
continue
}
- i := x1.AuxInt
- s := x1.Aux
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
mem := x1.Args[1]
p1 := x1.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 {
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
continue
}
r0 := sh.Args[0]
- if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 {
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
continue
}
x0 := r0.Args[0]
- if x0.Op != OpAMD64MOVWload || x0.AuxInt != i || x0.Aux != s {
+ if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
- v1.AuxInt = i
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i)
+ v1.Aux = symToAux(s)
v1.AddArg2(p0, mem)
v0.AddArg(v1)
return true
if s0.Op != OpAMD64SHLLconst {
continue
}
- j0 := s0.AuxInt
+ j0 := auxIntToInt8(s0.AuxInt)
x0 := s0.Args[0]
if x0.Op != OpAMD64MOVBload {
continue
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
or := v_1
if s1.Op != OpAMD64SHLLconst {
continue
}
- j1 := s1.AuxInt
+ j1 := auxIntToInt8(s1.AuxInt)
x1 := s1.Args[0]
if x1.Op != OpAMD64MOVBload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
- v1.AuxInt = j1
+ v1.AuxInt = int8ToAuxInt(j1)
v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
- v2.AuxInt = 8
+ v2.AuxInt = int8ToAuxInt(8)
v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
- v3.AuxInt = i0
- v3.Aux = s
+ v3.AuxInt = int32ToAuxInt(i0)
+ v3.Aux = symToAux(s)
v3.AddArg2(p, mem)
v2.AddArg(v3)
v1.AddArg(v2)
if s0.Op != OpAMD64SHLLconst {
continue
}
- j0 := s0.AuxInt
+ j0 := auxIntToInt8(s0.AuxInt)
x0 := s0.Args[0]
if x0.Op != OpAMD64MOVBload {
continue
}
- i := x0.AuxInt
- s := x0.Aux
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p0 := x0.Args[0]
or := v_1
if s1.Op != OpAMD64SHLLconst {
continue
}
- j1 := s1.AuxInt
+ j1 := auxIntToInt8(s1.AuxInt)
x1 := s1.Args[0]
- if x1.Op != OpAMD64MOVBload || x1.AuxInt != i || x1.Aux != s {
+ if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
- v1.AuxInt = j1
+ v1.AuxInt = int8ToAuxInt(j1)
v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
- v2.AuxInt = 8
+ v2.AuxInt = int8ToAuxInt(8)
v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
- v3.AuxInt = i
- v3.Aux = s
+ v3.AuxInt = int32ToAuxInt(i)
+ v3.Aux = symToAux(s)
v3.AddArg2(p0, mem)
v2.AddArg(v3)
v1.AddArg(v2)
if x0.Op != OpAMD64MOVBload {
continue
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 {
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
continue
}
x1 := sh.Args[0]
if x1.Op != OpAMD64MOVBload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
v.copyOf(v0)
- v0.AuxInt = i0
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
v0.AddArg2(p, mem)
return true
}
if x0.Op != OpAMD64MOVBload {
continue
}
- i := x0.AuxInt
- s := x0.Aux
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p0 := x0.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 {
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
continue
}
x1 := sh.Args[0]
- if x1.Op != OpAMD64MOVBload || x1.AuxInt != i || x1.Aux != s {
+ if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
v.copyOf(v0)
- v0.AuxInt = i
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
v0.AddArg2(p0, mem)
return true
}
if x0.Op != OpAMD64MOVWload {
continue
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 {
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
continue
}
x1 := sh.Args[0]
if x1.Op != OpAMD64MOVWload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
v.copyOf(v0)
- v0.AuxInt = i0
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
v0.AddArg2(p, mem)
return true
}
if x0.Op != OpAMD64MOVWload {
continue
}
- i := x0.AuxInt
- s := x0.Aux
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p0 := x0.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 {
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
continue
}
x1 := sh.Args[0]
- if x1.Op != OpAMD64MOVWload || x1.AuxInt != i || x1.Aux != s {
+ if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
v.copyOf(v0)
- v0.AuxInt = i
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
v0.AddArg2(p0, mem)
return true
}
if x0.Op != OpAMD64MOVLload {
continue
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 {
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
continue
}
x1 := sh.Args[0]
if x1.Op != OpAMD64MOVLload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64)
v.copyOf(v0)
- v0.AuxInt = i0
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
v0.AddArg2(p, mem)
return true
}
if x0.Op != OpAMD64MOVLload {
continue
}
- i := x0.AuxInt
- s := x0.Aux
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p0 := x0.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 {
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
continue
}
x1 := sh.Args[0]
- if x1.Op != OpAMD64MOVLload || x1.AuxInt != i || x1.Aux != s {
+ if x1.Op != OpAMD64MOVLload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64)
v.copyOf(v0)
- v0.AuxInt = i
- v0.Aux = s
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
v0.AddArg2(p0, mem)
return true
}
if s1.Op != OpAMD64SHLQconst {
continue
}
- j1 := s1.AuxInt
+ j1 := auxIntToInt8(s1.AuxInt)
x1 := s1.Args[0]
if x1.Op != OpAMD64MOVBload {
continue
}
- i1 := x1.AuxInt
- s := x1.Aux
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
mem := x1.Args[1]
p := x1.Args[0]
or := v_1
if s0.Op != OpAMD64SHLQconst {
continue
}
- j0 := s0.AuxInt
+ j0 := auxIntToInt8(s0.AuxInt)
x0 := s0.Args[0]
if x0.Op != OpAMD64MOVBload {
continue
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
- v1.AuxInt = j0
+ v1.AuxInt = int8ToAuxInt(j0)
v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
- v2.AuxInt = i0
- v2.Aux = s
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
v2.AddArg2(p, mem)
v1.AddArg(v2)
v0.AddArg2(v1, y)
if s1.Op != OpAMD64SHLQconst {
continue
}
- j1 := s1.AuxInt
+ j1 := auxIntToInt8(s1.AuxInt)
x1 := s1.Args[0]
if x1.Op != OpAMD64MOVBload {
continue
}
- i := x1.AuxInt
- s := x1.Aux
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
mem := x1.Args[1]
p1 := x1.Args[0]
or := v_1
if s0.Op != OpAMD64SHLQconst {
continue
}
- j0 := s0.AuxInt
+ j0 := auxIntToInt8(s0.AuxInt)
x0 := s0.Args[0]
- if x0.Op != OpAMD64MOVBload || x0.AuxInt != i || x0.Aux != s {
+ if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
- v1.AuxInt = j0
+ v1.AuxInt = int8ToAuxInt(j0)
v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
- v2.AuxInt = i
- v2.Aux = s
+ v2.AuxInt = int32ToAuxInt(i)
+ v2.Aux = symToAux(s)
v2.AddArg2(p0, mem)
v1.AddArg(v2)
v0.AddArg2(v1, y)
if s1.Op != OpAMD64SHLQconst {
continue
}
- j1 := s1.AuxInt
+ j1 := auxIntToInt8(s1.AuxInt)
x1 := s1.Args[0]
if x1.Op != OpAMD64MOVWload {
continue
}
- i1 := x1.AuxInt
- s := x1.Aux
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
mem := x1.Args[1]
p := x1.Args[0]
or := v_1
if s0.Op != OpAMD64SHLQconst {
continue
}
- j0 := s0.AuxInt
+ j0 := auxIntToInt8(s0.AuxInt)
x0 := s0.Args[0]
if x0.Op != OpAMD64MOVWload {
continue
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
- v1.AuxInt = j0
+ v1.AuxInt = int8ToAuxInt(j0)
v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
- v2.AuxInt = i0
- v2.Aux = s
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
v2.AddArg2(p, mem)
v1.AddArg(v2)
v0.AddArg2(v1, y)
if s1.Op != OpAMD64SHLQconst {
continue
}
- j1 := s1.AuxInt
+ j1 := auxIntToInt8(s1.AuxInt)
x1 := s1.Args[0]
if x1.Op != OpAMD64MOVWload {
continue
}
- i := x1.AuxInt
- s := x1.Aux
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
mem := x1.Args[1]
p1 := x1.Args[0]
or := v_1
if s0.Op != OpAMD64SHLQconst {
continue
}
- j0 := s0.AuxInt
+ j0 := auxIntToInt8(s0.AuxInt)
x0 := s0.Args[0]
- if x0.Op != OpAMD64MOVWload || x0.AuxInt != i || x0.Aux != s {
+ if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
- v1.AuxInt = j0
+ v1.AuxInt = int8ToAuxInt(j0)
v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
- v2.AuxInt = i
- v2.Aux = s
+ v2.AuxInt = int32ToAuxInt(i)
+ v2.Aux = symToAux(s)
v2.AddArg2(p0, mem)
v1.AddArg(v2)
v0.AddArg2(v1, y)
if x1.Op != OpAMD64MOVBload {
continue
}
- i1 := x1.AuxInt
- s := x1.Aux
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
mem := x1.Args[1]
p := x1.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 {
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
continue
}
x0 := sh.Args[0]
if x0.Op != OpAMD64MOVBload {
continue
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
v.copyOf(v0)
- v0.AuxInt = 8
+ v0.AuxInt = int8ToAuxInt(8)
v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
- v1.AuxInt = i0
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
v1.AddArg2(p, mem)
v0.AddArg(v1)
return true
if x1.Op != OpAMD64MOVBload {
continue
}
- i := x1.AuxInt
- s := x1.Aux
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
mem := x1.Args[1]
p1 := x1.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 {
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
continue
}
x0 := sh.Args[0]
- if x0.Op != OpAMD64MOVBload || x0.AuxInt != i || x0.Aux != s {
+ if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
v.copyOf(v0)
- v0.AuxInt = 8
+ v0.AuxInt = int8ToAuxInt(8)
v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
- v1.AuxInt = i
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i)
+ v1.Aux = symToAux(s)
v1.AddArg2(p0, mem)
v0.AddArg(v1)
return true
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
r1 := v_0
- if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 {
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
continue
}
x1 := r1.Args[0]
if x1.Op != OpAMD64MOVWload {
continue
}
- i1 := x1.AuxInt
- s := x1.Aux
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
mem := x1.Args[1]
p := x1.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 {
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
continue
}
r0 := sh.Args[0]
- if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 {
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
continue
}
x0 := r0.Args[0]
if x0.Op != OpAMD64MOVWload {
continue
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
- v1.AuxInt = i0
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
v1.AddArg2(p, mem)
v0.AddArg(v1)
return true
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
r1 := v_0
- if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 {
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
continue
}
x1 := r1.Args[0]
if x1.Op != OpAMD64MOVWload {
continue
}
- i := x1.AuxInt
- s := x1.Aux
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
mem := x1.Args[1]
p1 := x1.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 {
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
continue
}
r0 := sh.Args[0]
- if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 {
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
continue
}
x0 := r0.Args[0]
- if x0.Op != OpAMD64MOVWload || x0.AuxInt != i || x0.Aux != s {
+ if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
- v1.AuxInt = i
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i)
+ v1.Aux = symToAux(s)
v1.AddArg2(p0, mem)
v0.AddArg(v1)
return true
if x1.Op != OpAMD64MOVLload {
continue
}
- i1 := x1.AuxInt
- s := x1.Aux
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
mem := x1.Args[1]
p := x1.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 {
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
continue
}
r0 := sh.Args[0]
if x0.Op != OpAMD64MOVLload {
continue
}
- i0 := x0.AuxInt
- if x0.Aux != s {
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64)
- v1.AuxInt = i0
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
v1.AddArg2(p, mem)
v0.AddArg(v1)
return true
if x1.Op != OpAMD64MOVLload {
continue
}
- i := x1.AuxInt
- s := x1.Aux
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
mem := x1.Args[1]
p1 := x1.Args[0]
sh := v_1
- if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 {
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
continue
}
r0 := sh.Args[0]
continue
}
x0 := r0.Args[0]
- if x0.Op != OpAMD64MOVLload || x0.AuxInt != i || x0.Aux != s {
+ if x0.Op != OpAMD64MOVLload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
continue
}
_ = x0.Args[1]
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64)
- v1.AuxInt = i
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i)
+ v1.Aux = symToAux(s)
v1.AddArg2(p0, mem)
v0.AddArg(v1)
return true
if s0.Op != OpAMD64SHLQconst {
continue
}
- j0 := s0.AuxInt
+ j0 := auxIntToInt8(s0.AuxInt)
x0 := s0.Args[0]
if x0.Op != OpAMD64MOVBload {
continue
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
or := v_1
if s1.Op != OpAMD64SHLQconst {
continue
}
- j1 := s1.AuxInt
+ j1 := auxIntToInt8(s1.AuxInt)
x1 := s1.Args[0]
if x1.Op != OpAMD64MOVBload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
- v1.AuxInt = j1
+ v1.AuxInt = int8ToAuxInt(j1)
v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
- v2.AuxInt = 8
+ v2.AuxInt = int8ToAuxInt(8)
v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
- v3.AuxInt = i0
- v3.Aux = s
+ v3.AuxInt = int32ToAuxInt(i0)
+ v3.Aux = symToAux(s)
v3.AddArg2(p, mem)
v2.AddArg(v3)
v1.AddArg(v2)
if s0.Op != OpAMD64SHLQconst {
continue
}
- j0 := s0.AuxInt
+ j0 := auxIntToInt8(s0.AuxInt)
x0 := s0.Args[0]
if x0.Op != OpAMD64MOVBload {
continue
}
- i := x0.AuxInt
- s := x0.Aux
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p0 := x0.Args[0]
or := v_1
if s1.Op != OpAMD64SHLQconst {
continue
}
- j1 := s1.AuxInt
+ j1 := auxIntToInt8(s1.AuxInt)
x1 := s1.Args[0]
- if x1.Op != OpAMD64MOVBload || x1.AuxInt != i || x1.Aux != s {
+ if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
- v1.AuxInt = j1
+ v1.AuxInt = int8ToAuxInt(j1)
v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
- v2.AuxInt = 8
+ v2.AuxInt = int8ToAuxInt(8)
v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
- v3.AuxInt = i
- v3.Aux = s
+ v3.AuxInt = int32ToAuxInt(i)
+ v3.Aux = symToAux(s)
v3.AddArg2(p0, mem)
v2.AddArg(v3)
v1.AddArg(v2)
if s0.Op != OpAMD64SHLQconst {
continue
}
- j0 := s0.AuxInt
+ j0 := auxIntToInt8(s0.AuxInt)
r0 := s0.Args[0]
- if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 {
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
continue
}
x0 := r0.Args[0]
if x0.Op != OpAMD64MOVWload {
continue
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
or := v_1
if s1.Op != OpAMD64SHLQconst {
continue
}
- j1 := s1.AuxInt
+ j1 := auxIntToInt8(s1.AuxInt)
r1 := s1.Args[0]
- if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 {
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
continue
}
x1 := r1.Args[0]
if x1.Op != OpAMD64MOVWload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
- v1.AuxInt = j1
+ v1.AuxInt = int8ToAuxInt(j1)
v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32)
v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
- v3.AuxInt = i0
- v3.Aux = s
+ v3.AuxInt = int32ToAuxInt(i0)
+ v3.Aux = symToAux(s)
v3.AddArg2(p, mem)
v2.AddArg(v3)
v1.AddArg(v2)
if s0.Op != OpAMD64SHLQconst {
continue
}
- j0 := s0.AuxInt
+ j0 := auxIntToInt8(s0.AuxInt)
r0 := s0.Args[0]
- if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 {
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
continue
}
x0 := r0.Args[0]
if x0.Op != OpAMD64MOVWload {
continue
}
- i := x0.AuxInt
- s := x0.Aux
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p0 := x0.Args[0]
or := v_1
if s1.Op != OpAMD64SHLQconst {
continue
}
- j1 := s1.AuxInt
+ j1 := auxIntToInt8(s1.AuxInt)
r1 := s1.Args[0]
- if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 {
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
continue
}
x1 := r1.Args[0]
- if x1.Op != OpAMD64MOVWload || x1.AuxInt != i || x1.Aux != s {
+ if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
v.copyOf(v0)
v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
- v1.AuxInt = j1
+ v1.AuxInt = int8ToAuxInt(j1)
v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32)
v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
- v3.AuxInt = i
- v3.Aux = s
+ v3.AuxInt = int32ToAuxInt(i)
+ v3.Aux = symToAux(s)
v3.AddArg2(p0, mem)
v2.AddArg(v3)
v1.AddArg(v2)
v_0 := v.Args[0]
b := v.Block
// match: (TESTB (MOVLconst [c]) x)
- // result: (TESTBconst [c] x)
+ // result: (TESTBconst [int8(c)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpAMD64MOVLconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_1
v.reset(OpAMD64TESTBconst)
- v.AuxInt = c
+ v.AuxInt = int8ToAuxInt(int8(c))
v.AddArg(x)
return true
}
// cond: x.Op != OpAMD64MOVLconst
// result: (TESTB x x)
for {
- if v.AuxInt != -1 {
+ if auxIntToInt8(v.AuxInt) != -1 {
break
}
x := v_0
if v_0.Op != OpAMD64MOVLconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_1
v.reset(OpAMD64TESTLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
// cond: x.Op != OpAMD64MOVLconst
// result: (TESTL x x)
for {
- if v.AuxInt != -1 {
+ if auxIntToInt32(v.AuxInt) != -1 {
break
}
x := v_0
b := v.Block
// match: (TESTQ (MOVQconst [c]) x)
// cond: is32Bit(c)
- // result: (TESTQconst [c] x)
+ // result: (TESTQconst [int32(c)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpAMD64MOVQconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
if !(is32Bit(c)) {
continue
}
v.reset(OpAMD64TESTQconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg(x)
return true
}
// cond: x.Op != OpAMD64MOVQconst
// result: (TESTQ x x)
for {
- if v.AuxInt != -1 {
+ if auxIntToInt32(v.AuxInt) != -1 {
break
}
x := v_0
v_0 := v.Args[0]
b := v.Block
// match: (TESTW (MOVLconst [c]) x)
- // result: (TESTWconst [c] x)
+ // result: (TESTWconst [int16(c)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpAMD64MOVLconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_1
v.reset(OpAMD64TESTWconst)
- v.AuxInt = c
+ v.AuxInt = int16ToAuxInt(int16(c))
v.AddArg(x)
return true
}
// cond: x.Op != OpAMD64MOVLconst
// result: (TESTW x x)
for {
- if v.AuxInt != -1 {
+ if auxIntToInt16(v.AuxInt) != -1 {
break
}
x := v_0