&& clobber(x1, x2, mem2)
=> (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
-(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
- (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
- (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
- (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
- (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-
-(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
- (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
- (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
- (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
- (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-
-(MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
- (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-(MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
- (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-(MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
- (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-(MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
- (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-
-(MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload [off1+off2] {sym} ptr mem)
-(MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload [off1+off2] {sym} ptr mem)
-(MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem)
-(MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem)
-(MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore [off1+off2] {sym} ptr val mem)
-(MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore [off1+off2] {sym} ptr val mem)
-(MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem)
-(MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem)
-(MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
- (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-(MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
- (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-(MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
- (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-(MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
- (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVQload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
+(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVLload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
+(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
+(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
+
+(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVQstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
+(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVLstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
+(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
+(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
+
+(MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVQstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
+(MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVLstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
+(MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVWstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
+(MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVBstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
+
+(MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOVQload [off1+off2] {sym} ptr mem)
+(MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOVLload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOVWload [off1+off2] {sym} ptr mem)
+(MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOVBload [off1+off2] {sym} ptr mem)
+(MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => (MOVQstore [off1+off2] {sym} ptr val mem)
+(MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => (MOVLstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) =>
+ (MOVQstoreconst [sc.addOffset32(off)] {s} ptr mem)
+(MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) =>
+ (MOVLstoreconst [sc.addOffset32(off)] {s} ptr mem)
+(MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) =>
+ (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
+(MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) =>
+ (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
// Merge load and op
// TODO: add indexed variants?
-((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) -> ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem)
-((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) -> ((ADD|SUB|AND|OR|XOR)Lload x [off] {sym} ptr mem)
-((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) -> ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
-((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) -> ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
-(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) -> ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
-(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) ->
+((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem)
+((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Lload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
+(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off] {sym} ptr x mem)
-(MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) -> ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
-(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) ->
+(MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
+(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off] {sym} ptr x mem)
// Merge ADDQconst and LEAQ into atomic loads.
-(MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
+(MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
(MOV(Q|L|B)atomicload [off1+off2] {sym} ptr mem)
-(MOV(Q|L|B)atomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOV(Q|L|B)atomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOV(Q|L|B)atomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOV(Q|L|B)atomicload [off1+off2] {mergeSymTyped(sym1, sym2)} ptr mem)
// Merge ADDQconst and LEAQ into atomic stores.
-(XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
+(XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
(XCHGQ [off1+off2] {sym} val ptr mem)
-(XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB ->
- (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
-(XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
+(XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
+ (XCHGQ [off1+off2] {mergeSymTyped(sym1,sym2)} val ptr mem)
+(XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
(XCHGL [off1+off2] {sym} val ptr mem)
-(XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB ->
- (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+(XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
+ (XCHGL [off1+off2] {mergeSymTyped(sym1,sym2)} val ptr mem)
// Merge ADDQconst into atomic adds.
// TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
-(XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
+(XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
(XADDQlock [off1+off2] {sym} val ptr mem)
-(XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
+(XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
(XADDLlock [off1+off2] {sym} val ptr mem)
// Merge ADDQconst into atomic compare and swaps.
// TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
-(CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(off1+off2) ->
+(CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) =>
(CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
-(CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(off1+off2) ->
+(CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) =>
(CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
// We don't need the conditional move if we know the arg of BSF is not zero.
-(CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) && c != 0 -> x
+(CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) && c != 0 => x
// Extension is unnecessary for trailing zeros.
-(BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) -> (BSFQ (ORQconst <t> [1<<8] x))
-(BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) -> (BSFQ (ORQconst <t> [1<<16] x))
+(BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) => (BSFQ (ORQconst <t> [1<<8] x))
+(BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) => (BSFQ (ORQconst <t> [1<<16] x))
// Redundant sign/zero extensions
// Note: see issue 21963. We have to make sure we use the right type on
// the resulting extension (the outer type, not the inner type).
-(MOVLQSX (MOVLQSX x)) -> (MOVLQSX x)
-(MOVLQSX (MOVWQSX x)) -> (MOVWQSX x)
-(MOVLQSX (MOVBQSX x)) -> (MOVBQSX x)
-(MOVWQSX (MOVWQSX x)) -> (MOVWQSX x)
-(MOVWQSX (MOVBQSX x)) -> (MOVBQSX x)
-(MOVBQSX (MOVBQSX x)) -> (MOVBQSX x)
-(MOVLQZX (MOVLQZX x)) -> (MOVLQZX x)
-(MOVLQZX (MOVWQZX x)) -> (MOVWQZX x)
-(MOVLQZX (MOVBQZX x)) -> (MOVBQZX x)
-(MOVWQZX (MOVWQZX x)) -> (MOVWQZX x)
-(MOVWQZX (MOVBQZX x)) -> (MOVBQZX x)
-(MOVBQZX (MOVBQZX x)) -> (MOVBQZX x)
+(MOVLQSX (MOVLQSX x)) => (MOVLQSX x)
+(MOVLQSX (MOVWQSX x)) => (MOVWQSX x)
+(MOVLQSX (MOVBQSX x)) => (MOVBQSX x)
+(MOVWQSX (MOVWQSX x)) => (MOVWQSX x)
+(MOVWQSX (MOVBQSX x)) => (MOVBQSX x)
+(MOVBQSX (MOVBQSX x)) => (MOVBQSX x)
+(MOVLQZX (MOVLQZX x)) => (MOVLQZX x)
+(MOVLQZX (MOVWQZX x)) => (MOVWQZX x)
+(MOVLQZX (MOVBQZX x)) => (MOVBQZX x)
+(MOVWQZX (MOVWQZX x)) => (MOVWQZX x)
+(MOVWQZX (MOVBQZX x)) => (MOVBQZX x)
+(MOVBQZX (MOVBQZX x)) => (MOVBQZX x)
(MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) ->
- ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) =>
+ ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
(MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) ->
- ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) =>
+ ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
// float <-> int register moves, with no conversion.
// These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}.
-(MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) -> (MOVQf2i val)
-(MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) -> (MOVLf2i val)
-(MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) -> (MOVQi2f val)
-(MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) -> (MOVLi2f val)
+(MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) => (MOVQf2i val)
+(MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) => (MOVLf2i val)
+(MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) => (MOVQi2f val)
+(MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) => (MOVLi2f val)
// Other load-like ops.
-(ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (ADDQ x (MOVQf2i y))
-(ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (ADDL x (MOVLf2i y))
-(SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (SUBQ x (MOVQf2i y))
-(SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (SUBL x (MOVLf2i y))
-(ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (ANDQ x (MOVQf2i y))
-(ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (ANDL x (MOVLf2i y))
-( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> ( ORQ x (MOVQf2i y))
-( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> ( ORL x (MOVLf2i y))
-(XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (XORQ x (MOVQf2i y))
-(XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (XORL x (MOVLf2i y))
-
-(ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (ADDSD x (MOVQi2f y))
-(ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (ADDSS x (MOVLi2f y))
-(SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (SUBSD x (MOVQi2f y))
-(SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (SUBSS x (MOVLi2f y))
-(MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (MULSD x (MOVQi2f y))
-(MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (MULSS x (MOVLi2f y))
+(ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ADDQ x (MOVQf2i y))
+(ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ADDL x (MOVLf2i y))
+(SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (SUBQ x (MOVQf2i y))
+(SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (SUBL x (MOVLf2i y))
+(ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ANDQ x (MOVQf2i y))
+(ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ANDL x (MOVLf2i y))
+( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => ( ORQ x (MOVQf2i y))
+( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => ( ORL x (MOVLf2i y))
+(XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (XORQ x (MOVQf2i y))
+(XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (XORL x (MOVLf2i y))
+
+(ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (ADDSD x (MOVQi2f y))
+(ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (ADDSS x (MOVLi2f y))
+(SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (SUBSD x (MOVQi2f y))
+(SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (SUBSS x (MOVLi2f y))
+(MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (MULSD x (MOVQi2f y))
+(MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (MULSS x (MOVLi2f y))
// Redirect stores to use the other register set.
-(MOVQstore [off] {sym} ptr (MOVQf2i val) mem) -> (MOVSDstore [off] {sym} ptr val mem)
-(MOVLstore [off] {sym} ptr (MOVLf2i val) mem) -> (MOVSSstore [off] {sym} ptr val mem)
-(MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) -> (MOVQstore [off] {sym} ptr val mem)
-(MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) -> (MOVLstore [off] {sym} ptr val mem)
+(MOVQstore [off] {sym} ptr (MOVQf2i val) mem) => (MOVSDstore [off] {sym} ptr val mem)
+(MOVLstore [off] {sym} ptr (MOVLf2i val) mem) => (MOVSSstore [off] {sym} ptr val mem)
+(MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) => (MOVQstore [off] {sym} ptr val mem)
+(MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) => (MOVLstore [off] {sym} ptr val mem)
// Load args directly into the register class where it will be used.
// We do this by just modifying the type of the Arg.
-(MOVQf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() -> @b.Func.Entry (Arg <t> [off] {sym})
-(MOVLf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() -> @b.Func.Entry (Arg <t> [off] {sym})
-(MOVQi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() -> @b.Func.Entry (Arg <t> [off] {sym})
-(MOVLi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() -> @b.Func.Entry (Arg <t> [off] {sym})
+(MOVQf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
+(MOVLf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
+(MOVQi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
+(MOVLi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
// LEAQ is rematerializeable, so this helps to avoid register spill.
// See issue 22947 for details
-(ADD(Q|L)const [off] x:(SP)) -> (LEA(Q|L) [off] x)
+(ADD(Q|L)const [off] x:(SP)) => (LEA(Q|L) [off] x)
// HMULx is commutative, but its first argument must go in AX.
// If possible, put a rematerializeable value in the first argument slot,
if l.Op != OpAMD64MOVLload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(OpAMD64ADDLload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: (ADDLconst [off] x:(SP))
// result: (LEAL [off] x)
for {
- off := v.AuxInt
+ off := auxIntToInt32(v.AuxInt)
x := v_0
if x.Op != OpSP {
break
}
v.reset(OpAMD64LEAL)
- v.AuxInt = off
+ v.AuxInt = int32ToAuxInt(off)
v.AddArg(x)
return true
}
// match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// result: (ADDL x (MOVLf2i y))
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
ptr := v_1
- if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym {
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
break
}
y := v_2.Args[1]
if l.Op != OpAMD64MOVQload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(OpAMD64ADDQload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: (ADDQconst [off] x:(SP))
// result: (LEAQ [off] x)
for {
- off := v.AuxInt
+ off := auxIntToInt32(v.AuxInt)
x := v_0
if x.Op != OpSP {
break
}
v.reset(OpAMD64LEAQ)
- v.AuxInt = off
+ v.AuxInt = int32ToAuxInt(off)
v.AddArg(x)
return true
}
// match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// result: (ADDQ x (MOVQf2i y))
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
ptr := v_1
- if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym {
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
break
}
y := v_2.Args[1]
if l.Op != OpAMD64MOVSDload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(OpAMD64ADDSDload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
// result: (ADDSD x (MOVQi2f y))
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
ptr := v_1
- if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym {
+ if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
break
}
y := v_2.Args[1]
if l.Op != OpAMD64MOVSSload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(OpAMD64ADDSSload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
// result: (ADDSS x (MOVLi2f y))
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
ptr := v_1
- if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym {
+ if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
break
}
y := v_2.Args[1]
if l.Op != OpAMD64MOVLload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(OpAMD64ANDLload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// result: (ANDL x (MOVLf2i y))
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
ptr := v_1
- if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym {
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
break
}
y := v_2.Args[1]
if l.Op != OpAMD64MOVQload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(OpAMD64ANDQload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// result: (ANDQ x (MOVQf2i y))
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
ptr := v_1
- if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym {
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
break
}
y := v_2.Args[1]
break
}
t := v_0.Type
- if v_0.AuxInt != 1<<8 {
+ if auxIntToInt32(v_0.AuxInt) != 1<<8 {
break
}
v_0_0 := v_0.Args[0]
x := v_0_0.Args[0]
v.reset(OpAMD64BSFQ)
v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
- v0.AuxInt = 1 << 8
+ v0.AuxInt = int32ToAuxInt(1 << 8)
v0.AddArg(x)
v.AddArg(v0)
return true
break
}
t := v_0.Type
- if v_0.AuxInt != 1<<16 {
+ if auxIntToInt32(v_0.AuxInt) != 1<<16 {
break
}
v_0_0 := v_0.Args[0]
x := v_0_0.Args[0]
v.reset(OpAMD64BSFQ)
v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
- v0.AuxInt = 1 << 16
+ v0.AuxInt = int32ToAuxInt(1 << 16)
v0.AddArg(x)
v.AddArg(v0)
return true
if v_2_0_0.Op != OpAMD64ORQconst {
break
}
- c := v_2_0_0.AuxInt
+ c := auxIntToInt32(v_2_0_0.AuxInt)
if !(c != 0) {
break
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDQconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
old := v_1
new_ := v_2
mem := v_3
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64CMPXCHGLlock)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg4(ptr, old, new_, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDQconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
old := v_1
new_ := v_2
mem := v_3
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64CMPXCHGQlock)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg4(ptr, old, new_, mem)
return true
}
if l.Op != OpAMD64MOVSDload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
break
}
v.reset(OpAMD64DIVSDload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
if l.Op != OpAMD64MOVSSload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
break
}
v.reset(OpAMD64DIVSSload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVBatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVBatomicload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDQconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64MOVBatomicload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVBatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBatomicload [off1+off2] {mergeSymTyped(sym1, sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBatomicload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
return true
}
// match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
- // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
break
}
v.reset(OpAMD64MOVBload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
// match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVBload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64MOVBload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
return true
}
// match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
- // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
break
}
v.reset(OpAMD64MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
// match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVBstore [off1+off2] {sym} ptr val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
return true
}
// match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
- // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
- // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
for {
- sc := v.AuxInt
- sym1 := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAL {
break
}
- off := v_0.AuxInt
- sym2 := v_0.Aux
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
break
}
v.reset(OpAMD64MOVBstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
- // cond: ValAndOff(sc).canAdd(off)
- // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
for {
- sc := v.AuxInt
- s := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDLconst {
break
}
- off := v_0.AuxInt
+ off := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(ValAndOff(sc).canAdd(off)) {
+ if !(sc.canAdd32(off)) {
break
}
v.reset(OpAMD64MOVBstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = s
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
v.AddArg2(ptr, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVLatomicload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDQconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64MOVLatomicload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVLatomicload [off1+off2] {mergeSymTyped(sym1, sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVLatomicload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
break
}
u := v_0.Type
- off := v_0.AuxInt
- sym := v_0.Aux
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
if !(t.Size() == u.Size()) {
break
}
b = b.Func.Entry
v0 := b.NewValue0(v.Pos, OpArg, t)
v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
return true
}
return false
break
}
u := v_0.Type
- off := v_0.AuxInt
- sym := v_0.Aux
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
if !(t.Size() == u.Size()) {
break
}
b = b.Func.Entry
v0 := b.NewValue0(v.Pos, OpArg, t)
v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
return true
}
return false
return true
}
// match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
- // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
break
}
v.reset(OpAMD64MOVLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
// match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVLload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64MOVLload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _))
// result: (MOVLf2i val)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpAMD64MOVSSstore || v_1.AuxInt != off || v_1.Aux != sym {
+ if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
val := v_1.Args[1]
return true
}
// match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
- // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
break
}
v.reset(OpAMD64MOVLstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
// match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVLstore [off1+off2] {sym} ptr val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64MOVLstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
// cond: y.Uses==1 && clobber(y)
// result: (ADDLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
- if y.Op != OpAMD64ADDLload || y.AuxInt != off || y.Aux != sym {
+ if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
break
}
mem := y.Args[2]
break
}
v.reset(OpAMD64ADDLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && clobber(y)
// result: (ANDLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
- if y.Op != OpAMD64ANDLload || y.AuxInt != off || y.Aux != sym {
+ if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
break
}
mem := y.Args[2]
break
}
v.reset(OpAMD64ANDLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && clobber(y)
// result: (ORLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
- if y.Op != OpAMD64ORLload || y.AuxInt != off || y.Aux != sym {
+ if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
break
}
mem := y.Args[2]
break
}
v.reset(OpAMD64ORLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && clobber(y)
// result: (XORLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
- if y.Op != OpAMD64XORLload || y.AuxInt != off || y.Aux != sym {
+ if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
break
}
mem := y.Args[2]
break
}
v.reset(OpAMD64XORLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (ADDLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64ADDL {
y_1 := y.Args[1]
for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
l := y_0
- if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
continue
}
mem := l.Args[1]
continue
}
v.reset(OpAMD64ADDLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (SUBLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64SUBL {
}
x := y.Args[1]
l := y.Args[0]
- if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
break
}
v.reset(OpAMD64SUBLmodify)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg3(ptr, x, mem)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (ANDLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64ANDL {
y_1 := y.Args[1]
for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
l := y_0
- if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
continue
}
mem := l.Args[1]
continue
}
v.reset(OpAMD64ANDLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (ORLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64ORL {
y_1 := y.Args[1]
for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
l := y_0
- if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
continue
}
mem := l.Args[1]
continue
}
v.reset(OpAMD64ORLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (XORLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64XORL {
y_1 := y.Args[1]
for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
l := y_0
- if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
continue
}
mem := l.Args[1]
continue
}
v.reset(OpAMD64XORLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (BTCLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64BTCL {
}
x := y.Args[1]
l := y.Args[0]
- if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
break
}
v.reset(OpAMD64BTCLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (BTRLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64BTRL {
}
x := y.Args[1]
l := y.Args[0]
- if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
break
}
v.reset(OpAMD64BTRLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (BTSLmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64BTSL {
}
x := y.Args[1]
l := y.Args[0]
- if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
break
}
v.reset(OpAMD64BTSLmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a)
- // result: (ADDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (ADDLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
a := v_1
if a.Op != OpAMD64ADDLconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt32(a.AuxInt)
l := a.Args[0]
- if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
ptr2 := l.Args[0]
- if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
break
}
v.reset(OpAMD64ADDLconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a)
- // result: (ANDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (ANDLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
a := v_1
if a.Op != OpAMD64ANDLconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt32(a.AuxInt)
l := a.Args[0]
- if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
ptr2 := l.Args[0]
- if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
break
}
v.reset(OpAMD64ANDLconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a)
- // result: (ORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (ORLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
a := v_1
if a.Op != OpAMD64ORLconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt32(a.AuxInt)
l := a.Args[0]
- if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
ptr2 := l.Args[0]
- if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
break
}
v.reset(OpAMD64ORLconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a)
- // result: (XORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (XORLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
a := v_1
if a.Op != OpAMD64XORLconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt32(a.AuxInt)
l := a.Args[0]
- if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
ptr2 := l.Args[0]
- if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
break
}
v.reset(OpAMD64XORLconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr a:(BTCLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a)
- // result: (BTCLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (BTCLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
a := v_1
if a.Op != OpAMD64BTCLconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt8(a.AuxInt)
l := a.Args[0]
- if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
ptr2 := l.Args[0]
- if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
break
}
v.reset(OpAMD64BTCLconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr a:(BTRLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a)
- // result: (BTRLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (BTRLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
a := v_1
if a.Op != OpAMD64BTRLconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt8(a.AuxInt)
l := a.Args[0]
- if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
ptr2 := l.Args[0]
- if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
break
}
v.reset(OpAMD64BTRLconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr a:(BTSLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a)
- // result: (BTSLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (BTSLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
a := v_1
if a.Op != OpAMD64BTSLconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt8(a.AuxInt)
l := a.Args[0]
- if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
ptr2 := l.Args[0]
- if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
break
}
v.reset(OpAMD64BTSLconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem)
// result: (MOVSSstore [off] {sym} ptr val mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpAMD64MOVLf2i {
break
val := v_1.Args[0]
mem := v_2
v.reset(OpAMD64MOVSSstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
return true
}
// match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
- // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
- // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVLstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
for {
- sc := v.AuxInt
- sym1 := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAL {
break
}
- off := v_0.AuxInt
- sym2 := v_0.Aux
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
break
}
v.reset(OpAMD64MOVLstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
- // cond: ValAndOff(sc).canAdd(off)
- // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVLstoreconst [sc.addOffset32(off)] {s} ptr mem)
for {
- sc := v.AuxInt
- s := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDLconst {
break
}
- off := v_0.AuxInt
+ off := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(ValAndOff(sc).canAdd(off)) {
+ if !(sc.canAdd32(off)) {
break
}
v.reset(OpAMD64MOVLstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = s
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
v.AddArg2(ptr, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVQatomicload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDQconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64MOVQatomicload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVQatomicload [off1+off2] {mergeSymTyped(sym1, sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVQatomicload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
break
}
u := v_0.Type
- off := v_0.AuxInt
- sym := v_0.Aux
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
if !(t.Size() == u.Size()) {
break
}
b = b.Func.Entry
v0 := b.NewValue0(v.Pos, OpArg, t)
v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
return true
}
return false
break
}
u := v_0.Type
- off := v_0.AuxInt
- sym := v_0.Aux
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
if !(t.Size() == u.Size()) {
break
}
b = b.Func.Entry
v0 := b.NewValue0(v.Pos, OpArg, t)
v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
return true
}
return false
return true
}
// match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
- // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVQload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
break
}
v.reset(OpAMD64MOVQload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
// match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVQload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64MOVQload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _))
// result: (MOVQf2i val)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpAMD64MOVSDstore || v_1.AuxInt != off || v_1.Aux != sym {
+ if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
val := v_1.Args[1]
return true
}
// match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
- // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVQstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
break
}
v.reset(OpAMD64MOVQstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
// match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVQstore [off1+off2] {sym} ptr val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64MOVQstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
// cond: y.Uses==1 && clobber(y)
// result: (ADDQmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
- if y.Op != OpAMD64ADDQload || y.AuxInt != off || y.Aux != sym {
+ if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
break
}
mem := y.Args[2]
break
}
v.reset(OpAMD64ADDQmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && clobber(y)
// result: (ANDQmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
- if y.Op != OpAMD64ANDQload || y.AuxInt != off || y.Aux != sym {
+ if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
break
}
mem := y.Args[2]
break
}
v.reset(OpAMD64ANDQmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && clobber(y)
// result: (ORQmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
- if y.Op != OpAMD64ORQload || y.AuxInt != off || y.Aux != sym {
+ if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
break
}
mem := y.Args[2]
break
}
v.reset(OpAMD64ORQmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && clobber(y)
// result: (XORQmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
- if y.Op != OpAMD64XORQload || y.AuxInt != off || y.Aux != sym {
+ if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
break
}
mem := y.Args[2]
break
}
v.reset(OpAMD64XORQmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (ADDQmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64ADDQ {
y_1 := y.Args[1]
for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
l := y_0
- if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
continue
}
mem := l.Args[1]
continue
}
v.reset(OpAMD64ADDQmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (SUBQmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64SUBQ {
}
x := y.Args[1]
l := y.Args[0]
- if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
break
}
v.reset(OpAMD64SUBQmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (ANDQmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64ANDQ {
y_1 := y.Args[1]
for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
l := y_0
- if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
continue
}
mem := l.Args[1]
continue
}
v.reset(OpAMD64ANDQmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (ORQmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64ORQ {
y_1 := y.Args[1]
for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
l := y_0
- if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
continue
}
mem := l.Args[1]
continue
}
v.reset(OpAMD64ORQmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (XORQmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64XORQ {
y_1 := y.Args[1]
for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
l := y_0
- if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
continue
}
mem := l.Args[1]
continue
}
v.reset(OpAMD64XORQmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (BTCQmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64BTCQ {
}
x := y.Args[1]
l := y.Args[0]
- if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
break
}
v.reset(OpAMD64BTCQmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (BTRQmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64BTRQ {
}
x := y.Args[1]
l := y.Args[0]
- if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
break
}
v.reset(OpAMD64BTRQmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
// result: (BTSQmodify [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64BTSQ {
}
x := y.Args[1]
l := y.Args[0]
- if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
break
}
v.reset(OpAMD64BTSQmodify)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a)
- // result: (ADDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (ADDQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
a := v_1
if a.Op != OpAMD64ADDQconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt32(a.AuxInt)
l := a.Args[0]
- if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
ptr2 := l.Args[0]
- if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
break
}
v.reset(OpAMD64ADDQconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a)
- // result: (ANDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (ANDQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
a := v_1
if a.Op != OpAMD64ANDQconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt32(a.AuxInt)
l := a.Args[0]
- if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
ptr2 := l.Args[0]
- if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
break
}
v.reset(OpAMD64ANDQconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a)
- // result: (ORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (ORQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
a := v_1
if a.Op != OpAMD64ORQconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt32(a.AuxInt)
l := a.Args[0]
- if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
ptr2 := l.Args[0]
- if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
break
}
v.reset(OpAMD64ORQconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a)
- // result: (XORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (XORQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
a := v_1
if a.Op != OpAMD64XORQconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt32(a.AuxInt)
l := a.Args[0]
- if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
ptr2 := l.Args[0]
- if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
break
}
v.reset(OpAMD64XORQconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(BTCQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a)
- // result: (BTCQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (BTCQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
a := v_1
if a.Op != OpAMD64BTCQconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt8(a.AuxInt)
l := a.Args[0]
- if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
ptr2 := l.Args[0]
- if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
break
}
v.reset(OpAMD64BTCQconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(BTRQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a)
- // result: (BTRQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (BTRQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
a := v_1
if a.Op != OpAMD64BTRQconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt8(a.AuxInt)
l := a.Args[0]
- if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
ptr2 := l.Args[0]
- if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
break
}
v.reset(OpAMD64BTRQconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(BTSQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a)
- // result: (BTSQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (BTSQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
a := v_1
if a.Op != OpAMD64BTSQconst {
break
}
- c := a.AuxInt
+ c := auxIntToInt8(a.AuxInt)
l := a.Args[0]
- if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym {
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
break
}
mem := l.Args[1]
ptr2 := l.Args[0]
- if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
break
}
v.reset(OpAMD64BTSQconstmodify)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem)
// result: (MOVSDstore [off] {sym} ptr val mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpAMD64MOVQf2i {
break
val := v_1.Args[0]
mem := v_2
v.reset(OpAMD64MOVSDstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
return true
}
// match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
- // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
- // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVQstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
for {
- sc := v.AuxInt
- sym1 := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAL {
break
}
- off := v_0.AuxInt
- sym2 := v_0.Aux
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
break
}
v.reset(OpAMD64MOVQstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
- // cond: ValAndOff(sc).canAdd(off)
- // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVQstoreconst [sc.addOffset32(off)] {s} ptr mem)
for {
- sc := v.AuxInt
- s := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDLconst {
break
}
- off := v_0.AuxInt
+ off := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(ValAndOff(sc).canAdd(off)) {
+ if !(sc.canAdd32(off)) {
break
}
v.reset(OpAMD64MOVQstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = s
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _))
// result: (MOVQi2f val)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpAMD64MOVQstore || v_1.AuxInt != off || v_1.Aux != sym {
+ if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
val := v_1.Args[1]
// match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem)
// result: (MOVQstore [off] {sym} ptr val mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpAMD64MOVQi2f {
break
val := v_1.Args[0]
mem := v_2
v.reset(OpAMD64MOVQstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _))
// result: (MOVLi2f val)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpAMD64MOVLstore || v_1.AuxInt != off || v_1.Aux != sym {
+ if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
val := v_1.Args[1]
// match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem)
// result: (MOVLstore [off] {sym} ptr val mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpAMD64MOVLi2f {
break
val := v_1.Args[0]
mem := v_2
v.reset(OpAMD64MOVLstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
return true
}
// match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
- // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
break
}
v.reset(OpAMD64MOVWload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
// match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVWload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64MOVWload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
return true
}
// match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
- // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAL {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
// match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVWstore [off1+off2] {sym} ptr val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDLconst {
break
}
- off2 := v_0.AuxInt
+ off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
return true
}
// match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
- // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
- // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
for {
- sc := v.AuxInt
- sym1 := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAL {
break
}
- off := v_0.AuxInt
- sym2 := v_0.Aux
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
break
}
v.reset(OpAMD64MOVWstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
- // cond: ValAndOff(sc).canAdd(off)
- // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
for {
- sc := v.AuxInt
- s := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDLconst {
break
}
- off := v_0.AuxInt
+ off := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(ValAndOff(sc).canAdd(off)) {
+ if !(sc.canAdd32(off)) {
break
}
v.reset(OpAMD64MOVWstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = s
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
v.AddArg2(ptr, mem)
return true
}
if l.Op != OpAMD64MOVSDload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(OpAMD64MULSDload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
// result: (MULSD x (MOVQi2f y))
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
ptr := v_1
- if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym {
+ if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
break
}
y := v_2.Args[1]
if l.Op != OpAMD64MOVSSload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(OpAMD64MULSSload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
// result: (MULSS x (MOVLi2f y))
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
ptr := v_1
- if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym {
+ if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
break
}
y := v_2.Args[1]
if l.Op != OpAMD64MOVLload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(OpAMD64ORLload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// result: ( ORL x (MOVLf2i y))
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
ptr := v_1
- if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym {
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
break
}
y := v_2.Args[1]
if l.Op != OpAMD64MOVQload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(OpAMD64ORQload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// result: ( ORQ x (MOVQf2i y))
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
ptr := v_1
- if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym {
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
break
}
y := v_2.Args[1]
if l.Op != OpAMD64MOVLload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
break
}
v.reset(OpAMD64SUBLload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// result: (SUBL x (MOVLf2i y))
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
ptr := v_1
- if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym {
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
break
}
y := v_2.Args[1]
if l.Op != OpAMD64MOVQload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
break
}
v.reset(OpAMD64SUBQload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// result: (SUBQ x (MOVQf2i y))
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
ptr := v_1
- if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym {
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
break
}
y := v_2.Args[1]
if l.Op != OpAMD64MOVSDload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
break
}
v.reset(OpAMD64SUBSDload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
// result: (SUBSD x (MOVQi2f y))
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
ptr := v_1
- if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym {
+ if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
break
}
y := v_2.Args[1]
if l.Op != OpAMD64MOVSSload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
break
}
v.reset(OpAMD64SUBSSload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
// result: (SUBSS x (MOVLi2f y))
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
ptr := v_1
- if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym {
+ if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
break
}
y := v_2.Args[1]
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (XADDLlock [off1+off2] {sym} val ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64ADDQconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
ptr := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64XADDLlock)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, ptr, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (XADDQlock [off1+off2] {sym} val ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64ADDQconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
ptr := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64XADDQlock)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, ptr, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (XCHGL [off1+off2] {sym} val ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64ADDQconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
ptr := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64XCHGL)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, ptr, mem)
return true
}
// match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
- // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
+ // result: (XCHGL [off1+off2] {mergeSymTyped(sym1,sym2)} val ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
ptr := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
break
}
v.reset(OpAMD64XCHGL)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, ptr, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: is32Bit(int64(off1)+int64(off2))
// result: (XCHGQ [off1+off2] {sym} val ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64ADDQconst {
break
}
- off2 := v_1.AuxInt
+ off2 := auxIntToInt32(v_1.AuxInt)
ptr := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64XCHGQ)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(val, ptr, mem)
return true
}
// match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
- // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
+ // result: (XCHGQ [off1+off2] {mergeSymTyped(sym1,sym2)} val ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
ptr := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
break
}
v.reset(OpAMD64XCHGQ)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, ptr, mem)
return true
}
if l.Op != OpAMD64MOVLload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(OpAMD64XORLload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// result: (XORL x (MOVLf2i y))
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
ptr := v_1
- if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym {
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
break
}
y := v_2.Args[1]
if l.Op != OpAMD64MOVQload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(OpAMD64XORQload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(x, ptr, mem)
return true
}
// match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// result: (XORQ x (MOVQf2i y))
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
ptr := v_1
- if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym {
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
break
}
y := v_2.Args[1]