(MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQstoreconstidx8 [c] {sym} ptr idx mem)
// combine ADDQ into indexed loads and stores
-(MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
-(MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
-(MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem)
-(MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
-(MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem)
-(MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem)
-(MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVQloadidx8 [c+d] {sym} ptr idx mem)
-(MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
-(MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
-(MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
-(MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
-
-(MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
-(MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
-(MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
-(MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
-(MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
-
-(MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
-(MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
-(MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
-(MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
-(MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
-(MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem)
-(MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVQloadidx8 [c+8*d] {sym} ptr idx mem)
-(MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
-(MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
-(MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
-(MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
-
-(MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
-(MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
-(MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem)
-(MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
-(MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
-
-(MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
+(MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
+(MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
+(MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem)
+(MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+(MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem)
+(MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem)
+(MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVQloadidx8 [c+d] {sym} ptr idx mem)
+(MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
+(MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
+(MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
+(MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
+
+(MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
+(MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
+(MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
+(MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
+(MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
+
+(MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
+(MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
+(MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+2*d) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
+(MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+(MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+4*d) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
+(MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem)
+(MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+8*d) -> (MOVQloadidx8 [c+8*d] {sym} ptr idx mem)
+(MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
+(MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+4*d) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
+(MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
+(MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+8*d) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
+
+(MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+2*d) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
+(MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+4*d) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
+(MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+8*d) -> (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem)
+(MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+4*d) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
+(MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+8*d) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
+
+(MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
+(MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
(MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
+(MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
(MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
+(MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
(MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
+(MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
(MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
+(MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
(MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
+(MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
(MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
+(MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) ->
(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
+(MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) ->
(MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
+(MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(2*c) ->
(MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
-(MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
+(MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) ->
(MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
+(MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(4*c) ->
(MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
-(MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
+(MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) ->
(MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
+(MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(8*c) ->
(MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem)
// fold LEAQs together
(ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
(LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
-(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
+(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
+(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
+(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
+(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
+(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
+(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
+(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
+(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
// - Unused portions of AuxInt (or the Val portion of ValAndOff) are
// filled by sign-extending the used portion. Users of AuxInt which interpret
// AuxInt as unsigned (e.g. shifts) must be careful.
+// - All SymOff opcodes require their offset to fit in an int32.
// Suffixes encode the bit width of various instructions.
// Q (quad word) = 64 bit
// binary ops
{name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true, clobberFlags: true}, // arg0 + arg1
{name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1
- {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int64", typ: "UInt64", clobberFlags: true}, // arg0 + auxint
+ {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int32", typ: "UInt64", clobberFlags: true}, // arg0 + auxint
{name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", clobberFlags: true}, // arg0 + auxint
{name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
{name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
- {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+ {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
{name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
{name: "MULQ", argLength: 2, reg: gp21, asm: "IMULQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
{name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
- {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMULQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
+ {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMULQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
{name: "MULLconst", argLength: 1, reg: gp11, asm: "IMULL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
{name: "HMULQ", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULQ", clobberFlags: true}, // (arg0 * arg1) >> width
{name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
{name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
- {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
{name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
{name: "ORQ", argLength: 2, reg: gp21, asm: "ORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
{name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
- {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
{name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
{name: "XORQ", argLength: 2, reg: gp21, asm: "XORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
{name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
- {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
{name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
{name: "CMPQ", argLength: 2, reg: gp2flags, asm: "CMPQ", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPB", argLength: 2, reg: gp2flags, asm: "CMPB", typ: "Flags"}, // arg0 compare to arg1
- {name: "CMPQconst", argLength: 1, reg: gp1flags, asm: "CMPQ", typ: "Flags", aux: "Int64"}, // arg0 compare to auxint
+ {name: "CMPQconst", argLength: 1, reg: gp1flags, asm: "CMPQ", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
{name: "CMPLconst", argLength: 1, reg: gp1flags, asm: "CMPL", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
{name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int16"}, // arg0 compare to auxint
{name: "CMPBconst", argLength: 1, reg: gp1flags, asm: "CMPB", typ: "Flags", aux: "Int8"}, // arg0 compare to auxint
{name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0
{name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0
{name: "TESTB", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0
- {name: "TESTQconst", argLength: 1, reg: gp1flags, asm: "TESTQ", typ: "Flags", aux: "Int64"}, // (arg0 & auxint) compare to 0
+ {name: "TESTQconst", argLength: 1, reg: gp1flags, asm: "TESTQ", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0
{name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0
{name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0
{name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0
},
{
name: "ADDQconst",
- auxType: auxInt64,
+ auxType: auxInt32,
argLen: 1,
clobberFlags: true,
asm: x86.AADDQ,
},
{
name: "SUBQconst",
- auxType: auxInt64,
+ auxType: auxInt32,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
},
{
name: "MULQconst",
- auxType: auxInt64,
+ auxType: auxInt32,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
},
{
name: "ANDQconst",
- auxType: auxInt64,
+ auxType: auxInt32,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
},
{
name: "ORQconst",
- auxType: auxInt64,
+ auxType: auxInt32,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
},
{
name: "XORQconst",
- auxType: auxInt64,
+ auxType: auxInt32,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
},
{
name: "CMPQconst",
- auxType: auxInt64,
+ auxType: auxInt32,
argLen: 1,
asm: x86.ACMPQ,
reg: regInfo{
},
{
name: "TESTQconst",
- auxType: auxInt64,
+ auxType: auxInt32,
argLen: 1,
asm: x86.ATESTQ,
reg: regInfo{
return true
}
// match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
base := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
v.reset(OpAMD64MOVBload)
}
func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool {
// match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVBloadidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
d := v_1.AuxInt
ptr := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVBloadidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVBloadidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVBloadidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
base := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
v.reset(OpAMD64MOVBstore)
}
func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool {
// match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVBstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
return true
}
// match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
c := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVBstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
b := v.Block
_ = b
// match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVBstoreidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVBstoreidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
base := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
v.reset(OpAMD64MOVLload)
return true
}
// match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVLloadidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
d := v_1.AuxInt
ptr := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVLloadidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVLloadidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVLloadidx1)
v.AuxInt = c + d
v.Aux = sym
}
func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool {
// match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVLloadidx4 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVLloadidx4)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+4*d)
// result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
for {
c := v.AuxInt
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + 4*d)) {
+ break
+ }
v.reset(OpAMD64MOVLloadidx4)
v.AuxInt = c + 4*d
v.Aux = sym
}
func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool {
// match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
base := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
v.reset(OpAMD64MOVLstore)
return true
}
// match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVLstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
return true
}
// match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
c := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVLstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
typ := &b.Func.Config.Types
_ = typ
// match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVLstoreconstidx4)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
return true
}
// match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(4*c)
// result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
for {
x := v.AuxInt
c := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(4 * c)) {
+ break
+ }
v.reset(OpAMD64MOVLstoreconstidx4)
v.AuxInt = ValAndOff(x).add(4 * c)
v.Aux = sym
return true
}
// match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVLstoreidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVLstoreidx1)
v.AuxInt = c + d
v.Aux = sym
b := v.Block
_ = b
// match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVLstoreidx4)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+4*d)
// result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + 4*d)) {
+ break
+ }
v.reset(OpAMD64MOVLstoreidx4)
v.AuxInt = c + 4*d
v.Aux = sym
return true
}
// match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
base := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
v.reset(OpAMD64MOVQload)
return true
}
// match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVQloadidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
d := v_1.AuxInt
ptr := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVQloadidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVQloadidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVQloadidx1)
v.AuxInt = c + d
v.Aux = sym
}
func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool {
// match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVQloadidx8 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVQloadidx8)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+8*d)
// result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem)
for {
c := v.AuxInt
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + 8*d)) {
+ break
+ }
v.reset(OpAMD64MOVQloadidx8)
v.AuxInt = c + 8*d
v.Aux = sym
return true
}
// match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
base := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
v.reset(OpAMD64MOVQstore)
return true
}
// match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVQstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
return true
}
// match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
c := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVQstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
}
func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool {
// match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVQstoreconstidx8)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
return true
}
// match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(8*c)
// result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem)
for {
x := v.AuxInt
c := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(8 * c)) {
+ break
+ }
v.reset(OpAMD64MOVQstoreconstidx8)
v.AuxInt = ValAndOff(x).add(8 * c)
v.Aux = sym
return true
}
// match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVQstoreidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVQstoreidx1)
v.AuxInt = c + d
v.Aux = sym
}
func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool {
// match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVQstoreidx8)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+8*d)
// result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + 8*d)) {
+ break
+ }
v.reset(OpAMD64MOVQstoreidx8)
v.AuxInt = c + 8*d
v.Aux = sym
return true
}
// match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSDloadidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSDloadidx1)
v.AuxInt = c + d
v.Aux = sym
}
func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool {
// match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSDloadidx8)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+8*d)
// result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
for {
c := v.AuxInt
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + 8*d)) {
+ break
+ }
v.reset(OpAMD64MOVSDloadidx8)
v.AuxInt = c + 8*d
v.Aux = sym
return true
}
// match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSDstoreidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSDstoreidx1)
v.AuxInt = c + d
v.Aux = sym
}
func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool {
// match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSDstoreidx8)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+8*d)
// result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + 8*d)) {
+ break
+ }
v.reset(OpAMD64MOVSDstoreidx8)
v.AuxInt = c + 8*d
v.Aux = sym
return true
}
// match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSSloadidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSSloadidx1)
v.AuxInt = c + d
v.Aux = sym
}
func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool {
// match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSSloadidx4)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+4*d)
// result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
for {
c := v.AuxInt
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + 4*d)) {
+ break
+ }
v.reset(OpAMD64MOVSSloadidx4)
v.AuxInt = c + 4*d
v.Aux = sym
return true
}
// match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSSstoreidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSSstoreidx1)
v.AuxInt = c + d
v.Aux = sym
}
func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool {
// match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSSstoreidx4)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+4*d)
// result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + 4*d)) {
+ break
+ }
v.reset(OpAMD64MOVSSstoreidx4)
v.AuxInt = c + 4*d
v.Aux = sym
return true
}
// match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
base := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
v.reset(OpAMD64MOVWload)
return true
}
// match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVWloadidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
d := v_1.AuxInt
ptr := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVWloadidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVWloadidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVWloadidx1)
v.AuxInt = c + d
v.Aux = sym
}
func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool {
// match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVWloadidx2 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVWloadidx2)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+2*d)
// result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
for {
c := v.AuxInt
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + 2*d)) {
+ break
+ }
v.reset(OpAMD64MOVWloadidx2)
v.AuxInt = c + 2*d
v.Aux = sym
}
func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool {
// match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
base := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
v.reset(OpAMD64MOVWstore)
return true
}
// match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVWstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
return true
}
// match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
c := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVWstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
b := v.Block
_ = b
// match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVWstoreconstidx2)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
return true
}
// match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(2*c)
// result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
for {
x := v.AuxInt
c := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(2 * c)) {
+ break
+ }
v.reset(OpAMD64MOVWstoreconstidx2)
v.AuxInt = ValAndOff(x).add(2 * c)
v.Aux = sym
return true
}
// match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVWstoreidx1)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVWstoreidx1)
v.AuxInt = c + d
v.Aux = sym
b := v.Block
_ = b
// match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVWstoreidx2)
v.AuxInt = c + d
v.Aux = sym
return true
}
// match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+2*d)
// result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
for {
c := v.AuxInt
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + 2*d)) {
+ break
+ }
v.reset(OpAMD64MOVWstoreidx2)
v.AuxInt = c + 2*d
v.Aux = sym
return Yxxx
}
if ctxt.Arch.Family == sys.AMD64 {
+ // Offset must fit in a 32-bit signed field (or fit in a 32-bit unsigned field
+ // where the sign extension doesn't matter).
+ // Note: The latter happens only in assembly, for example crypto/sha1/sha1block_amd64.s.
+ if !(a.Offset == int64(int32(a.Offset)) ||
+ a.Offset == int64(uint32(a.Offset)) && p.As == ALEAL) {
+ return Yxxx
+ }
switch a.Name {
case obj.NAME_EXTERN, obj.NAME_STATIC, obj.NAME_GOTREF:
// Global variables can't use index registers and their
--- /dev/null
+// compile
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Make sure assembly offsets don't get too large.
+
+// To trigger issue21655, the index offset needs to be small
+// enough to fit into an int32 (to get rewritten to an ADDQconst)
+// but large enough to overflow an int32 after multiplying by the stride.
+
+package main
+
+func f1(a []int64, i int64) int64 {
+ return a[i+1<<30]
+}
+func f2(a []int32, i int64) int32 {
+ return a[i+1<<30]
+}
+func f3(a []int16, i int64) int16 {
+ return a[i+1<<30]
+}
+func f4(a []int8, i int64) int8 {
+ return a[i+1<<31]
+}
+func f5(a []float64, i int64) float64 {
+ return a[i+1<<30]
+}
+func f6(a []float32, i int64) float32 {
+ return a[i+1<<30]
+}
+
+// Note: Before the fix for issue 21655, f{1,2,5,6} made
+// the compiler crash. f3 silently generated the wrong
+// code, using an offset of -1<<31 instead of 1<<31.
+// (This is due to the assembler accepting offsets
+// like 0x80000000 and silently using them as
+// signed 32 bit offsets.)
+// f4 was ok, but testing it can't hurt.