(Not x) -> (XORBconst [1] x)
-(OffPtr [off] ptr) -> (ADDQconst [off] ptr)
+(OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr)
+(OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr)
(Const8 [val]) -> (MOVBconst [val])
(Const16 [val]) -> (MOVWconst [val])
(ADDQ x (ADDQconst [c] y)) -> (LEAQ1 [c] x y)
// fold ADDQ into LEAQ
-(ADDQconst [c] (LEAQ [d] {s} x)) -> (LEAQ [c+d] {s} x)
-(LEAQ [c] {s} (ADDQconst [d] x)) -> (LEAQ [c+d] {s} x)
+(ADDQconst [c] (LEAQ [d] {s} x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x)
+(LEAQ [c] {s} (ADDQconst [d] x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x)
(LEAQ [c] {s} (ADDQ x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
(ADDQ x (LEAQ [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
(ADDQ (LEAQ [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
// fold ADDQconst into LEAQx
-(ADDQconst [c] (LEAQ1 [d] {s} x y)) -> (LEAQ1 [c+d] {s} x y)
-(ADDQconst [c] (LEAQ2 [d] {s} x y)) -> (LEAQ2 [c+d] {s} x y)
-(ADDQconst [c] (LEAQ4 [d] {s} x y)) -> (LEAQ4 [c+d] {s} x y)
-(ADDQconst [c] (LEAQ8 [d] {s} x y)) -> (LEAQ8 [c+d] {s} x y)
-(LEAQ1 [c] {s} (ADDQconst [d] x) y) && x.Op != OpSB -> (LEAQ1 [c+d] {s} x y)
-(LEAQ1 [c] {s} x (ADDQconst [d] y)) && y.Op != OpSB -> (LEAQ1 [c+d] {s} x y)
-(LEAQ2 [c] {s} (ADDQconst [d] x) y) && x.Op != OpSB -> (LEAQ2 [c+d] {s} x y)
-(LEAQ2 [c] {s} x (ADDQconst [d] y)) && y.Op != OpSB -> (LEAQ2 [c+2*d] {s} x y)
-(LEAQ4 [c] {s} (ADDQconst [d] x) y) && x.Op != OpSB -> (LEAQ4 [c+d] {s} x y)
-(LEAQ4 [c] {s} x (ADDQconst [d] y)) && y.Op != OpSB -> (LEAQ4 [c+4*d] {s} x y)
-(LEAQ8 [c] {s} (ADDQconst [d] x) y) && x.Op != OpSB -> (LEAQ8 [c+d] {s} x y)
-(LEAQ8 [c] {s} x (ADDQconst [d] y)) && y.Op != OpSB -> (LEAQ8 [c+8*d] {s} x y)
+(ADDQconst [c] (LEAQ1 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ1 [c+d] {s} x y)
+(ADDQconst [c] (LEAQ2 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ2 [c+d] {s} x y)
+(ADDQconst [c] (LEAQ4 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ4 [c+d] {s} x y)
+(ADDQconst [c] (LEAQ8 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ8 [c+d] {s} x y)
+(LEAQ1 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ1 [c+d] {s} x y)
+(LEAQ1 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+d) && y.Op != OpSB -> (LEAQ1 [c+d] {s} x y)
+(LEAQ2 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ2 [c+d] {s} x y)
+(LEAQ2 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEAQ2 [c+2*d] {s} x y)
+(LEAQ4 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ4 [c+d] {s} x y)
+(LEAQ4 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+4*d) && y.Op != OpSB -> (LEAQ4 [c+4*d] {s} x y)
+(LEAQ8 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ8 [c+d] {s} x y)
+(LEAQ8 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+8*d) && y.Op != OpSB -> (LEAQ8 [c+8*d] {s} x y)
// fold shifts into LEAQx
(LEAQ1 [c] {s} x (SHLQconst [1] y)) -> (LEAQ2 [c] {s} x y)
// Fold extensions and ANDs together.
(MOVBQZX (ANDBconst [c] x)) -> (ANDQconst [c & 0xff] x)
(MOVWQZX (ANDWconst [c] x)) -> (ANDQconst [c & 0xffff] x)
-(MOVLQZX (ANDLconst [c] x)) -> (ANDQconst [c & 0xffffffff] x)
+(MOVLQZX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDQconst [c & 0x7fffffff] x)
(MOVBQSX (ANDBconst [c] x)) && c & 0x80 == 0 -> (ANDQconst [c & 0x7f] x)
(MOVWQSX (ANDWconst [c] x)) && c & 0x8000 == 0 -> (ANDQconst [c & 0x7fff] x)
(MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDQconst [c & 0x7fffffff] x)
// the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
// have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
// Nevertheless, let's do it!
-(MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) -> (MOVQload [addOff(off1, off2)] {sym} ptr mem)
-(MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) -> (MOVLload [addOff(off1, off2)] {sym} ptr mem)
-(MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) -> (MOVWload [addOff(off1, off2)] {sym} ptr mem)
-(MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) -> (MOVBload [addOff(off1, off2)] {sym} ptr mem)
-(MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) -> (MOVSSload [addOff(off1, off2)] {sym} ptr mem)
-(MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) -> (MOVSDload [addOff(off1, off2)] {sym} ptr mem)
-(MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) -> (MOVOload [addOff(off1, off2)] {sym} ptr mem)
-
-(MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) -> (MOVQstore [addOff(off1, off2)] {sym} ptr val mem)
-(MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) -> (MOVLstore [addOff(off1, off2)] {sym} ptr val mem)
-(MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) -> (MOVWstore [addOff(off1, off2)] {sym} ptr val mem)
-(MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) -> (MOVBstore [addOff(off1, off2)] {sym} ptr val mem)
-(MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) -> (MOVSSstore [addOff(off1, off2)] {sym} ptr val mem)
-(MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) -> (MOVSDstore [addOff(off1, off2)] {sym} ptr val mem)
-(MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) -> (MOVOstore [addOff(off1, off2)] {sym} ptr val mem)
+(MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload [off1+off2] {sym} ptr mem)
+(MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem)
+(MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem)
+(MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSSload [off1+off2] {sym} ptr mem)
+(MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSDload [off1+off2] {sym} ptr mem)
+(MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVOload [off1+off2] {sym} ptr mem)
+
+(MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore [off1+off2] {sym} ptr val mem)
+(MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSSstore [off1+off2] {sym} ptr val mem)
+(MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSDstore [off1+off2] {sym} ptr val mem)
+(MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVOstore [off1+off2] {sym} ptr val mem)
// Fold constants into stores.
(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) ->
// We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
// what variables are being read/written by the ops.
-(MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
- (MOVQload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
-(MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
- (MOVLload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
-(MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
- (MOVWload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
-(MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
- (MOVBload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
-(MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
- (MOVSSload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
-(MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
- (MOVSDload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
-(MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
- (MOVOload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
-
-(MOVBQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
- (MOVBQZXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
-(MOVWQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
- (MOVWQZXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
-(MOVLQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
- (MOVLQZXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
-(MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
- (MOVBQSXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
-(MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
- (MOVWQSXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
-(MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
- (MOVLQSXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
-
-
-(MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
- (MOVQstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
-(MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
- (MOVLstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
-(MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
- (MOVWstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
-(MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
- (MOVBstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
-(MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
- (MOVSSstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
-(MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
- (MOVSDstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
-(MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
- (MOVOstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
+(MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVBQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVBQZXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWQZXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVLQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVLQZXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+
+(MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
(MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
// generating indexed loads and stores
-(MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
- (MOVBloadidx1 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
- (MOVWloadidx2 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
- (MOVLloadidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
- (MOVQloadidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
- (MOVSSloadidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
- (MOVSDloadidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
-
-(MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
- (MOVBstoreidx1 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
-(MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
- (MOVWstoreidx2 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
-(MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
- (MOVLstoreidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
-(MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
- (MOVQstoreidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
-(MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
- (MOVSSstoreidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
-(MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
- (MOVSDstoreidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+
+(MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVBload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVBloadidx1 [off] {sym} ptr idx mem)
(MOVBstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx1 [off] {sym} ptr idx val mem)
(MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem)
// fold LEAQs together
-(LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && canMergeSym(sym1, sym2) ->
- (LEAQ [addOff(off1,off2)] {mergeSym(sym1,sym2)} x)
+(LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
// LEAQ into LEAQ1
-(LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
- (LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
-(LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) && canMergeSym(sym1, sym2) && y.Op != OpSB ->
- (LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
+(LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
+ (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB ->
+ (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAQ1 into LEAQ
-(LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && canMergeSym(sym1, sym2) ->
- (LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
+(LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAQ into LEAQ[248]
-(LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
- (LEAQ2 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
-(LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
- (LEAQ4 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
-(LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
- (LEAQ8 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
+(LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
+ (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
+ (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
+ (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAQ[248] into LEAQ
-(LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && canMergeSym(sym1, sym2) ->
- (LEAQ2 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
-(LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && canMergeSym(sym1, sym2) ->
- (LEAQ4 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
-(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && canMergeSym(sym1, sym2) ->
- (LEAQ8 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
+(LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
// lower Zero instructions with word sizes
(Zero [0] _ mem) -> mem
(ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [c+d])
(ADDWconst [c] (MOVWconst [d])) -> (MOVWconst [c+d])
(ADDBconst [c] (MOVBconst [d])) -> (MOVBconst [c+d])
-(ADDQconst [c] (ADDQconst [d] x)) -> (ADDQconst [c+d] x)
+(ADDQconst [c] (ADDQconst [d] x)) && is32Bit(c+d) -> (ADDQconst [c+d] x)
(ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [c+d] x)
(ADDWconst [c] (ADDWconst [d] x)) -> (ADDWconst [c+d] x)
(ADDBconst [c] (ADDBconst [d] x)) -> (ADDBconst [c+d] x)
(SUBLconst [c] (MOVLconst [d])) -> (MOVLconst [d-c])
(SUBWconst [c] (MOVWconst [d])) -> (MOVWconst [d-c])
(SUBBconst [c] (MOVBconst [d])) -> (MOVBconst [d-c])
-(SUBQconst [c] (SUBQconst [d] x)) -> (ADDQconst [-c-d] x)
+(SUBQconst [c] (SUBQconst [d] x)) && is32Bit(-c-d) -> (ADDQconst [-c-d] x)
(SUBLconst [c] (SUBLconst [d] x)) -> (ADDLconst [-c-d] x)
(SUBWconst [c] (SUBWconst [d] x)) -> (ADDWconst [-c-d] x)
(SUBBconst [c] (SUBBconst [d] x)) -> (ADDBconst [-c-d] x)
return true
}
// match: (ADDQconst [c] (LEAQ [d] {s} x))
- // cond:
+ // cond: is32Bit(c+d)
// result: (LEAQ [c+d] {s} x)
for {
c := v.AuxInt
d := v.Args[0].AuxInt
s := v.Args[0].Aux
x := v.Args[0].Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64LEAQ)
v.AuxInt = c + d
v.Aux = s
return true
}
// match: (ADDQconst [c] (LEAQ1 [d] {s} x y))
- // cond:
+ // cond: is32Bit(c+d)
// result: (LEAQ1 [c+d] {s} x y)
for {
c := v.AuxInt
s := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64LEAQ1)
v.AuxInt = c + d
v.Aux = s
return true
}
// match: (ADDQconst [c] (LEAQ2 [d] {s} x y))
- // cond:
+ // cond: is32Bit(c+d)
// result: (LEAQ2 [c+d] {s} x y)
for {
c := v.AuxInt
s := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64LEAQ2)
v.AuxInt = c + d
v.Aux = s
return true
}
// match: (ADDQconst [c] (LEAQ4 [d] {s} x y))
- // cond:
+ // cond: is32Bit(c+d)
// result: (LEAQ4 [c+d] {s} x y)
for {
c := v.AuxInt
s := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64LEAQ4)
v.AuxInt = c + d
v.Aux = s
return true
}
// match: (ADDQconst [c] (LEAQ8 [d] {s} x y))
- // cond:
+ // cond: is32Bit(c+d)
// result: (LEAQ8 [c+d] {s} x y)
for {
c := v.AuxInt
s := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64LEAQ8)
v.AuxInt = c + d
v.Aux = s
return true
}
// match: (ADDQconst [c] (ADDQconst [d] x))
- // cond:
+ // cond: is32Bit(c+d)
// result: (ADDQconst [c+d] x)
for {
c := v.AuxInt
}
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64ADDQconst)
v.AuxInt = c + d
v.AddArg(x)
b := v.Block
_ = b
// match: (LEAQ [c] {s} (ADDQconst [d] x))
- // cond:
+ // cond: is32Bit(c+d)
// result: (LEAQ [c+d] {s} x)
for {
c := v.AuxInt
}
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64LEAQ)
v.AuxInt = c + d
v.Aux = s
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
- // cond: canMergeSym(sym1, sym2)
- // result: (LEAQ [addOff(off1,off2)] {mergeSym(sym1,sym2)} x)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
for {
off1 := v.AuxInt
sym1 := v.Aux
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64LEAQ)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
- // cond: canMergeSym(sym1, sym2)
- // result: (LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64LEAQ1)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
- // cond: canMergeSym(sym1, sym2)
- // result: (LEAQ2 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64LEAQ2)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
- // cond: canMergeSym(sym1, sym2)
- // result: (LEAQ4 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64LEAQ4)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
- // cond: canMergeSym(sym1, sym2)
- // result: (LEAQ8 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64LEAQ8)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
b := v.Block
_ = b
// match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
- // cond: x.Op != OpSB
+ // cond: is32Bit(c+d) && x.Op != OpSB
// result: (LEAQ1 [c+d] {s} x y)
for {
c := v.AuxInt
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
y := v.Args[1]
- if !(x.Op != OpSB) {
+ if !(is32Bit(c+d) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ1)
return true
}
// match: (LEAQ1 [c] {s} x (ADDQconst [d] y))
- // cond: y.Op != OpSB
+ // cond: is32Bit(c+d) && y.Op != OpSB
// result: (LEAQ1 [c+d] {s} x y)
for {
c := v.AuxInt
}
d := v.Args[1].AuxInt
y := v.Args[1].Args[0]
- if !(y.Op != OpSB) {
+ if !(is32Bit(c+d) && y.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ1)
return true
}
// match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
- // cond: canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[1]
- if !(canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ1)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
// match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y))
- // cond: canMergeSym(sym1, sym2) && y.Op != OpSB
- // result: (LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB
+ // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
off2 := v.Args[1].AuxInt
sym2 := v.Args[1].Aux
y := v.Args[1].Args[0]
- if !(canMergeSym(sym1, sym2) && y.Op != OpSB) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ1)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
b := v.Block
_ = b
// match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
- // cond: x.Op != OpSB
+ // cond: is32Bit(c+d) && x.Op != OpSB
// result: (LEAQ2 [c+d] {s} x y)
for {
c := v.AuxInt
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
y := v.Args[1]
- if !(x.Op != OpSB) {
+ if !(is32Bit(c+d) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ2)
return true
}
// match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
- // cond: y.Op != OpSB
+ // cond: is32Bit(c+2*d) && y.Op != OpSB
// result: (LEAQ2 [c+2*d] {s} x y)
for {
c := v.AuxInt
}
d := v.Args[1].AuxInt
y := v.Args[1].Args[0]
- if !(y.Op != OpSB) {
+ if !(is32Bit(c+2*d) && y.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ2)
return true
}
// match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
- // cond: canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAQ2 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[1]
- if !(canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ2)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
b := v.Block
_ = b
// match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
- // cond: x.Op != OpSB
+ // cond: is32Bit(c+d) && x.Op != OpSB
// result: (LEAQ4 [c+d] {s} x y)
for {
c := v.AuxInt
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
y := v.Args[1]
- if !(x.Op != OpSB) {
+ if !(is32Bit(c+d) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ4)
return true
}
// match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
- // cond: y.Op != OpSB
+ // cond: is32Bit(c+4*d) && y.Op != OpSB
// result: (LEAQ4 [c+4*d] {s} x y)
for {
c := v.AuxInt
}
d := v.Args[1].AuxInt
y := v.Args[1].Args[0]
- if !(y.Op != OpSB) {
+ if !(is32Bit(c+4*d) && y.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ4)
return true
}
// match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
- // cond: canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAQ4 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[1]
- if !(canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ4)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
b := v.Block
_ = b
// match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
- // cond: x.Op != OpSB
+ // cond: is32Bit(c+d) && x.Op != OpSB
// result: (LEAQ8 [c+d] {s} x y)
for {
c := v.AuxInt
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
y := v.Args[1]
- if !(x.Op != OpSB) {
+ if !(is32Bit(c+d) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ8)
return true
}
// match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
- // cond: y.Op != OpSB
+ // cond: is32Bit(c+8*d) && y.Op != OpSB
// result: (LEAQ8 [c+8*d] {s} x y)
for {
c := v.AuxInt
}
d := v.Args[1].AuxInt
y := v.Args[1].Args[0]
- if !(y.Op != OpSB) {
+ if !(is32Bit(c+8*d) && y.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ8)
return true
}
// match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
- // cond: canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAQ8 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[1]
- if !(canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ8)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
b := v.Block
_ = b
// match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVBQSXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBQSXload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
b := v.Block
_ = b
// match: (MOVBQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVBQZXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBQZXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBQZXload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem)
- // cond:
- // result: (MOVBload [addOff(off1, off2)] {sym} ptr mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
v.reset(OpAMD64MOVBload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVBload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVBloadidx1 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBloadidx1)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
return true
}
// match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
- // cond:
- // result: (MOVBstore [addOff(off1, off2)] {sym} ptr val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
sym := v.Aux
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
v.reset(OpAMD64MOVBstore)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
return true
}
// match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVBstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBstore)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
return true
}
// match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVBstoreidx1 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBstoreidx1)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
b := v.Block
_ = b
// match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVLQSXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVLQSXload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MOVLQZX (ANDLconst [c] x))
- // cond:
- // result: (ANDQconst [c & 0xffffffff] x)
+ // cond: c & 0x80000000 == 0
+ // result: (ANDQconst [c & 0x7fffffff] x)
for {
if v.Args[0].Op != OpAMD64ANDLconst {
break
}
c := v.Args[0].AuxInt
x := v.Args[0].Args[0]
+ if !(c&0x80000000 == 0) {
+ break
+ }
v.reset(OpAMD64ANDQconst)
- v.AuxInt = c & 0xffffffff
+ v.AuxInt = c & 0x7fffffff
v.AddArg(x)
return true
}
b := v.Block
_ = b
// match: (MOVLQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVLQZXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVLQZXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVLQZXload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem)
- // cond:
- // result: (MOVLload [addOff(off1, off2)] {sym} ptr mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVLload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
v.reset(OpAMD64MOVLload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVLload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVLload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVLloadidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVLloadidx4)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
return true
}
// match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
- // cond:
- // result: (MOVLstore [addOff(off1, off2)] {sym} ptr val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVLstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
sym := v.Aux
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
v.reset(OpAMD64MOVLstore)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
return true
}
// match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVLstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVLstore)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
return true
}
// match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVLstoreidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVLstoreidx4)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
b := v.Block
_ = b
// match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem)
- // cond:
- // result: (MOVOload [addOff(off1, off2)] {sym} ptr mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVOload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
v.reset(OpAMD64MOVOload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVOload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVOload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
b := v.Block
_ = b
// match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
- // cond:
- // result: (MOVOstore [addOff(off1, off2)] {sym} ptr val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVOstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
sym := v.Aux
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
v.reset(OpAMD64MOVOstore)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
return true
}
// match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVOstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVOstore)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
return true
}
// match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem)
- // cond:
- // result: (MOVQload [addOff(off1, off2)] {sym} ptr mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVQload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
v.reset(OpAMD64MOVQload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVQload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVQload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVQloadidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVQloadidx8)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
b := v.Block
_ = b
// match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
- // cond:
- // result: (MOVQstore [addOff(off1, off2)] {sym} ptr val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVQstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
sym := v.Aux
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
v.reset(OpAMD64MOVQstore)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
return true
}
// match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVQstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVQstore)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
return true
}
// match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVQstoreidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVQstoreidx8)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
b := v.Block
_ = b
// match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem)
- // cond:
- // result: (MOVSDload [addOff(off1, off2)] {sym} ptr mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVSDload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
v.reset(OpAMD64MOVSDload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVSDload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSDload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVSDloadidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSDloadidx8)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
b := v.Block
_ = b
// match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
- // cond:
- // result: (MOVSDstore [addOff(off1, off2)] {sym} ptr val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVSDstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
sym := v.Aux
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
v.reset(OpAMD64MOVSDstore)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
return true
}
// match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVSDstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSDstore)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
return true
}
// match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVSDstoreidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSDstoreidx8)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
b := v.Block
_ = b
// match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem)
- // cond:
- // result: (MOVSSload [addOff(off1, off2)] {sym} ptr mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVSSload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
v.reset(OpAMD64MOVSSload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVSSload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSSload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVSSloadidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSSloadidx4)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
b := v.Block
_ = b
// match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
- // cond:
- // result: (MOVSSstore [addOff(off1, off2)] {sym} ptr val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVSSstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
sym := v.Aux
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
v.reset(OpAMD64MOVSSstore)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
return true
}
// match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVSSstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSSstore)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
return true
}
// match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVSSstoreidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSSstoreidx4)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
b := v.Block
_ = b
// match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVWQSXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVWQSXload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
b := v.Block
_ = b
// match: (MOVWQZXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVWQZXload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWQZXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVWQZXload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem)
- // cond:
- // result: (MOVWload [addOff(off1, off2)] {sym} ptr mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
v.reset(OpAMD64MOVWload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVWload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
sym2 := v.Args[0].Aux
base := v.Args[0].Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVWload)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVWloadidx2 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVWloadidx2)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
return true
}
// match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
- // cond:
- // result: (MOVWstore [addOff(off1, off2)] {sym} ptr val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
sym := v.Aux
ptr := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
v.reset(OpAMD64MOVWstore)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
return true
}
// match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVWstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
base := v.Args[0].Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(val)
return true
}
// match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVWstoreidx2 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVWstoreidx2)
- v.AuxInt = addOff(off1, off2)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
b := v.Block
_ = b
// match: (OffPtr [off] ptr)
- // cond:
+ // cond: is32Bit(off)
// result: (ADDQconst [off] ptr)
for {
off := v.AuxInt
ptr := v.Args[0]
+ if !(is32Bit(off)) {
+ break
+ }
v.reset(OpAMD64ADDQconst)
v.AuxInt = off
v.AddArg(ptr)
return true
}
+ // match: (OffPtr [off] ptr)
+ // cond:
+ // result: (ADDQ (MOVQconst [off]) ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ v.reset(OpAMD64ADDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
+ v0.AuxInt = off
+ v.AddArg(v0)
+ v.AddArg(ptr)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool {
return true
}
// match: (SUBQconst [c] (SUBQconst [d] x))
- // cond:
+ // cond: is32Bit(-c-d)
// result: (ADDQconst [-c-d] x)
for {
c := v.AuxInt
}
d := v.Args[0].AuxInt
x := v.Args[0].Args[0]
+ if !(is32Bit(-c - d)) {
+ break
+ }
v.reset(OpAMD64ADDQconst)
v.AuxInt = -c - d
v.AddArg(x)