(Addr {sym} base) && config.PtrSize == 8 -> (LEAQ {sym} base)
(Addr {sym} base) && config.PtrSize == 4 -> (LEAL {sym} base)
-(MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 -> (SETLmem [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 -> (SETLEmem [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 -> (SETGmem [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 -> (SETGEmem [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 -> (SETEQmem [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 -> (SETNEmem [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 -> (SETBmem [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 -> (SETBEmem [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 -> (SETAmem [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 -> (SETAEmem [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 -> (SETLstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 -> (SETLEstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 -> (SETGstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 -> (SETGEstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 -> (SETEQstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 -> (SETNEstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 -> (SETBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 -> (SETBEstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 -> (SETAstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 -> (SETAEstore [off] {sym} ptr x mem)
// block rewrites
(If (SETL cmp) yes no) -> (LT cmp yes no)
-> (SET(B|AE) (BTQconst [log2(c)] x))
(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c) && !config.nacl
-> (SET(B|AE) (BTQconst [log2(c)] x))
-// SET..mem variant
-(SET(NE|EQ)mem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) && !config.nacl
- -> (SET(B|AE)mem [off] {sym} ptr (BTL x y) mem)
-(SET(NE|EQ)mem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) && !config.nacl
- -> (SET(B|AE)mem [off] {sym} ptr (BTQ x y) mem)
-(SET(NE|EQ)mem [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(c) && !config.nacl
- -> (SET(B|AE)mem [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem)
-(SET(NE|EQ)mem [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(c) && !config.nacl
- -> (SET(B|AE)mem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
-(SET(NE|EQ)mem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c) && !config.nacl
- -> (SET(B|AE)mem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
+// SET..store variant
+(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) && !config.nacl
+ -> (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) && !config.nacl
+ -> (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(c) && !config.nacl
+ -> (SET(B|AE)store [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(c) && !config.nacl
+ -> (SET(B|AE)store [off] {sym} ptr (BTQconst [log2(c)] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c) && !config.nacl
+ -> (SET(B|AE)store [off] {sym} ptr (BTQconst [log2(c)] x) mem)
// Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
// and further combining shifts.
// Rewrite a & 1 != 1 into a & 1 == 0.
// Among other things, this lets us turn (a>>b)&1 != 1 into a bit test.
(SET(NE|EQ) (CMPLconst [1] s:(ANDLconst [1] _))) -> (SET(EQ|NE) (CMPLconst [0] s))
-(SET(NE|EQ)mem [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) -> (SET(EQ|NE)mem [off] {sym} ptr (CMPLconst [0] s) mem)
+(SET(NE|EQ)store [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) -> (SET(EQ|NE)store [off] {sym} ptr (CMPLconst [0] s) mem)
(SET(NE|EQ) (CMPQconst [1] s:(ANDQconst [1] _))) -> (SET(EQ|NE) (CMPQconst [0] s))
-(SET(NE|EQ)mem [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) -> (SET(EQ|NE)mem [off] {sym} ptr (CMPQconst [0] s) mem)
+(SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) -> (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem)
// Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) && !config.nacl -> (BTS(Q|L) x y)
-> ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2 && !config.nacl
-> ((SETB|SETAE|ULT|UGE) (BTQconst [31] x))
-(SET(NE|EQ)mem [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2 && !config.nacl
- -> (SET(B|AE)mem [off] {sym} ptr (BTQconst [63] x) mem)
-(SET(NE|EQ)mem [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2 && !config.nacl
- -> (SET(B|AE)mem [off] {sym} ptr (BTLconst [31] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2 && !config.nacl
+ -> (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2 && !config.nacl
+ -> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2 && !config.nacl
-> ((SETB|SETAE|ULT|UGE) (BTQconst [0] x))
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2 && !config.nacl
-> ((SETB|SETAE|ULT|UGE) (BTLconst [0] x))
-(SET(NE|EQ)mem [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2 && !config.nacl
- -> (SET(B|AE)mem [off] {sym} ptr (BTQconst [0] x) mem)
-(SET(NE|EQ)mem [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2 && !config.nacl
- -> (SET(B|AE)mem [off] {sym} ptr (BTLconst [0] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2 && !config.nacl
+ -> (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2 && !config.nacl
+ -> (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem)
// Special-case manually testing last bit with "a>>63 != 0" (without "&1")
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2 && !config.nacl
-> ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2 && !config.nacl
-> ((SETB|SETAE|ULT|UGE) (BTLconst [31] x))
-(SET(NE|EQ)mem [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2 && !config.nacl
- -> (SET(B|AE)mem [off] {sym} ptr (BTQconst [63] x) mem)
-(SET(NE|EQ)mem [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2 && !config.nacl
- -> (SET(B|AE)mem [off] {sym} ptr (BTLconst [31] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2 && !config.nacl
+ -> (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2 && !config.nacl
+ -> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
// Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1)
(BTS(Q|L)const [c] (BTR(Q|L)const [c] x)) -> (BTS(Q|L)const [c] x)
(SETEQ (InvertFlags x)) -> (SETEQ x)
(SETNE (InvertFlags x)) -> (SETNE x)
-(SETLmem [off] {sym} ptr (InvertFlags x) mem) -> (SETGmem [off] {sym} ptr x mem)
-(SETGmem [off] {sym} ptr (InvertFlags x) mem) -> (SETLmem [off] {sym} ptr x mem)
-(SETBmem [off] {sym} ptr (InvertFlags x) mem) -> (SETAmem [off] {sym} ptr x mem)
-(SETAmem [off] {sym} ptr (InvertFlags x) mem) -> (SETBmem [off] {sym} ptr x mem)
-(SETLEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETGEmem [off] {sym} ptr x mem)
-(SETGEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETLEmem [off] {sym} ptr x mem)
-(SETBEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETAEmem [off] {sym} ptr x mem)
-(SETAEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETBEmem [off] {sym} ptr x mem)
-(SETEQmem [off] {sym} ptr (InvertFlags x) mem) -> (SETEQmem [off] {sym} ptr x mem)
-(SETNEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETNEmem [off] {sym} ptr x mem)
+(SETLstore [off] {sym} ptr (InvertFlags x) mem) -> (SETGstore [off] {sym} ptr x mem)
+(SETGstore [off] {sym} ptr (InvertFlags x) mem) -> (SETLstore [off] {sym} ptr x mem)
+(SETBstore [off] {sym} ptr (InvertFlags x) mem) -> (SETAstore [off] {sym} ptr x mem)
+(SETAstore [off] {sym} ptr (InvertFlags x) mem) -> (SETBstore [off] {sym} ptr x mem)
+(SETLEstore [off] {sym} ptr (InvertFlags x) mem) -> (SETGEstore [off] {sym} ptr x mem)
+(SETGEstore [off] {sym} ptr (InvertFlags x) mem) -> (SETLEstore [off] {sym} ptr x mem)
+(SETBEstore [off] {sym} ptr (InvertFlags x) mem) -> (SETAEstore [off] {sym} ptr x mem)
+(SETAEstore [off] {sym} ptr (InvertFlags x) mem) -> (SETBEstore [off] {sym} ptr x mem)
+(SETEQstore [off] {sym} ptr (InvertFlags x) mem) -> (SETEQstore [off] {sym} ptr x mem)
+(SETNEstore [off] {sym} ptr (InvertFlags x) mem) -> (SETNEstore [off] {sym} ptr x mem)
// sign extended loads
// Note: The combined instruction must end up in the same block
(MOV(Q|L|W|B|SS|SD|O)load [off1+off2] {sym} ptr mem)
(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) ->
(MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {sym} ptr val mem)
-(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) ->
- (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1+off2] {sym} base val mem)
-((ADD|SUB|AND|OR|XOR)Qmem [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
- ((ADD|SUB|AND|OR|XOR)Qmem [off1+off2] {sym} val base mem)
-((ADD|SUB|AND|OR|XOR)Lmem [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
- ((ADD|SUB|AND|OR|XOR)Lmem [off1+off2] {sym} val base mem)
-((ADD|SUB|MUL)SSmem [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
- ((ADD|SUB|MUL)SSmem [off1+off2] {sym} val base mem)
-((ADD|SUB|MUL)SDmem [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
- ((ADD|SUB|MUL)SDmem [off1+off2] {sym} val base mem)
-(ADD(L|Q)constmem [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
- (ADD(L|Q)constmem [ValAndOff(valoff1).add(off2)] {sym} base mem)
+(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) ->
+ (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {sym} base val mem)
+((ADD|SUB|AND|OR|XOR)Qload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
+ ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {sym} val base mem)
+((ADD|SUB|AND|OR|XOR)Lload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
+ ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
+((ADD|SUB|MUL)SSload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
+ ((ADD|SUB|MUL)SSload [off1+off2] {sym} val base mem)
+((ADD|SUB|MUL)SDload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
+ ((ADD|SUB|MUL)SDload [off1+off2] {sym} val base mem)
+(ADD(L|Q)constmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
+ (ADD(L|Q)constmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
// Fold constants into stores.
(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) ->
(MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOV(Q|L|W|B)storeconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
(MOV(Q|L|W|B)storeconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-((ADD|SUB|AND|OR|XOR)Qmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+((ADD|SUB|AND|OR|XOR)Qload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- ((ADD|SUB|AND|OR|XOR)Qmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
-((ADD|SUB|AND|OR|XOR)Lmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|AND|OR|XOR)Lload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- ((ADD|SUB|AND|OR|XOR)Lmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
-((ADD|SUB|MUL)SSmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|MUL)SSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- ((ADD|SUB|MUL)SSmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
-((ADD|SUB|MUL)SDmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ ((ADD|SUB|MUL)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|MUL)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- ((ADD|SUB|MUL)SDmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
-(ADD(L|Q)constmem [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ ((ADD|SUB|MUL)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+(ADD(L|Q)constmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) ->
- (ADD(L|Q)constmem [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ (ADD(L|Q)constmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
// generating indexed loads and stores
(MOV(B|W|L|Q|SS|SD)load [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
((SETNE|SETG|SETGE|SETA|SETAE) (FlagGT_UGT)) -> (MOVLconst [1])
((SETEQ|SETL|SETLE|SETB|SETBE) (FlagGT_UGT)) -> (MOVLconst [0])
-(SETEQmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETEQmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETEQmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETEQmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETEQmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-
-(SETNEmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETNEmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETNEmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETNEmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETNEmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-
-(SETLmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETLmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETLmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETLmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETLmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-
-(SETLEmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETLEmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETLEmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETLEmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETLEmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-
-(SETGmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETGmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETGmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETGmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETGmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-
-(SETGEmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETGEmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETGEmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETGEmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETGEmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-
-(SETBmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETBmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETBmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETBmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETBmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-
-(SETBEmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETBEmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETBEmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETBEmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETBEmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-
-(SETAmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETAmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETAmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETAmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETAmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-
-(SETAEmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETAEmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETAEmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
-(SETAEmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
-(SETAEmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETEQstore [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETEQstore [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETEQstore [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETEQstore [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETEQstore [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+
+(SETNEstore [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETNEstore [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETNEstore [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETNEstore [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETNEstore [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+
+(SETLstore [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETLstore [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETLstore [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETLstore [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETLstore [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+
+(SETLEstore [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETLEstore [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETLEstore [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETLEstore [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETLEstore [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+
+(SETGstore [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETGstore [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETGstore [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETGstore [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETGstore [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+
+(SETGEstore [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETGEstore [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETGEstore [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETGEstore [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETGEstore [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+
+(SETBstore [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETBstore [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETBstore [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETBstore [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETBstore [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+
+(SETBEstore [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETBEstore [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETBEstore [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETBEstore [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETBEstore [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+
+(SETAstore [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETAstore [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETAstore [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETAstore [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETAstore [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+
+(SETAEstore [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETAEstore [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETAEstore [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+(SETAEstore [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+(SETAEstore [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
// Remove redundant *const ops
(ADDQconst [0] x) -> x
// Merge load and op
// TODO: add indexed variants?
-((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|AND|OR|XOR)Qmem x [off] {sym} ptr mem)
-((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|AND|OR|XOR)Lmem x [off] {sym} ptr mem)
-((ADD|SUB|MUL)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|MUL)SDmem x [off] {sym} ptr mem)
-((ADD|SUB|MUL)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|MUL)SSmem x [off] {sym} ptr mem)
+((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem)
+((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|AND|OR|XOR)Lload x [off] {sym} ptr mem)
+((ADD|SUB|MUL)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|MUL)SDload x [off] {sym} ptr mem)
+((ADD|SUB|MUL)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|MUL)SSload x [off] {sym} ptr mem)
// Merge ADDQconst and LEAQ into atomic loads.
(MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
(MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) ->
- (ADDQconstmem {sym} [makeValAndOff(c,off)] ptr mem)
+ (ADDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
(MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) ->
- (ADDLconstmem {sym} [makeValAndOff(c,off)] ptr mem)
+ (ADDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
// float <-> int register moves, with no conversion.
// These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}.
(MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) -> (MOVLi2f val)
// Other load-like ops.
-(ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (ADDQ x (MOVQf2i y))
-(ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (ADDL x (MOVLf2i y))
-(SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (SUBQ x (MOVQf2i y))
-(SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (SUBL x (MOVLf2i y))
-(ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (ANDQ x (MOVQf2i y))
-(ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (ANDL x (MOVLf2i y))
-( ORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> ( ORQ x (MOVQf2i y))
-( ORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> ( ORL x (MOVLf2i y))
-(XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (XORQ x (MOVQf2i y))
-(XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (XORL x (MOVLf2i y))
-
-(ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _)) ->
+(ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (ADDQ x (MOVQf2i y))
+(ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (ADDL x (MOVLf2i y))
+(SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (SUBQ x (MOVQf2i y))
+(SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (SUBL x (MOVLf2i y))
+(ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (ANDQ x (MOVQf2i y))
+(ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (ANDL x (MOVLf2i y))
+( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> ( ORQ x (MOVQf2i y))
+( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> ( ORL x (MOVLf2i y))
+(XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (XORQ x (MOVQf2i y))
+(XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (XORL x (MOVLf2i y))
+
+(ADDQconstmodify [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _)) ->
(ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x))
-(ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _)) ->
+(ADDLconstmodify [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _)) ->
(ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x))
-(ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (ADDSD x (MOVQi2f y))
-(ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (ADDSS x (MOVLi2f y))
-(SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (SUBSD x (MOVQi2f y))
-(SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (SUBSS x (MOVLi2f y))
-(MULSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (MULSD x (MOVQi2f y))
-(MULSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (MULSS x (MOVLi2f y))
+(ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (ADDSD x (MOVQi2f y))
+(ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (ADDSS x (MOVLi2f y))
+(SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (SUBSD x (MOVQi2f y))
+(SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (SUBSS x (MOVLi2f y))
+(MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (MULSD x (MOVQi2f y))
+(MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (MULSS x (MOVLi2f y))
// Redirect stores to use the other register set.
(MOVQstore [off] {sym} ptr (MOVQf2i val) mem) -> (MOVSDstore [off] {sym} ptr val mem)
// Fold loads into compares
// Note: these may be undone by the flagalloc pass.
-(CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l, x) && clobber(l) -> (CMP(Q|L|W|B)mem {sym} [off] ptr x mem)
-(CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (InvertFlags (CMP(Q|L|W|B)mem {sym} [off] ptr x mem))
+(CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l, x) && clobber(l) -> (CMP(Q|L|W|B)load {sym} [off] ptr x mem)
+(CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem))
(CMP(Q|L|W|B)const l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) [c])
&& l.Uses == 1
&& validValAndOff(c, off)
&& clobber(l) ->
- @l.Block (CMP(Q|L|W|B)constmem {sym} [makeValAndOff(c,off)] ptr mem)
+ @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(c,off)] ptr mem)
-(CMPQmem {sym} [off] ptr (MOVQconst [c]) mem) && validValAndOff(c,off) -> (CMPQconstmem {sym} [makeValAndOff(c,off)] ptr mem)
-(CMPLmem {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(c,off) -> (CMPLconstmem {sym} [makeValAndOff(c,off)] ptr mem)
-(CMPWmem {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),off) -> (CMPWconstmem {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem)
-(CMPBmem {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),off) -> (CMPBconstmem {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem)
+(CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validValAndOff(c,off) -> (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem)
+(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(c,off) -> (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
+(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),off) -> (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem)
+(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),off) -> (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem)
(TEST(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2)
&& l == l2
&& l.Uses == 2
&& validValAndOff(0,off)
&& clobber(l) ->
- @l.Block (CMP(Q|L|W|B)constmem {sym} [makeValAndOff(0,off)] ptr mem)
+ @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0,off)] ptr mem)
{name: "MOVSDstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff", symEffect: "Write"}, // fp64 indexed by i store
{name: "MOVSDstoreidx8", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff", symEffect: "Write"}, // fp64 indexed by 8i store
- {name: "ADDSSmem", argLength: 3, reg: fp21load, asm: "ADDSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
- {name: "ADDSDmem", argLength: 3, reg: fp21load, asm: "ADDSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
- {name: "SUBSSmem", argLength: 3, reg: fp21load, asm: "SUBSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
- {name: "SUBSDmem", argLength: 3, reg: fp21load, asm: "SUBSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
- {name: "MULSSmem", argLength: 3, reg: fp21load, asm: "MULSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
- {name: "MULSDmem", argLength: 3, reg: fp21load, asm: "MULSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ADDSSload", argLength: 3, reg: fp21load, asm: "ADDSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ADDSDload", argLength: 3, reg: fp21load, asm: "ADDSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBSSload", argLength: 3, reg: fp21load, asm: "SUBSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBSDload", argLength: 3, reg: fp21load, asm: "SUBSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "MULSSload", argLength: 3, reg: fp21load, asm: "MULSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "MULSDload", argLength: 3, reg: fp21load, asm: "MULSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
// binary ops
- {name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true, clobberFlags: true}, // arg0 + arg1
- {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1
- {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int32", typ: "UInt64", clobberFlags: true}, // arg0 + auxint
- {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", clobberFlags: true}, // arg0 + auxint
- {name: "ADDQconstmem", argLength: 2, reg: gpstoreconst, asm: "ADDQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
- {name: "ADDLconstmem", argLength: 2, reg: gpstoreconst, asm: "ADDL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int32", typ: "UInt64", clobberFlags: true}, // arg0 + auxint
+ {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", clobberFlags: true}, // arg0 + auxint
+ {name: "ADDQconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ADDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
{name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
{name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
{name: "CMPBconst", argLength: 1, reg: gp1flags, asm: "CMPB", typ: "Flags", aux: "Int8"}, // arg0 compare to auxint
// compare *(arg0+auxint+aux) to arg1 (in that order). arg2=mem.
- {name: "CMPQmem", argLength: 3, reg: gp1flagsLoad, asm: "CMPQ", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
- {name: "CMPLmem", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
- {name: "CMPWmem", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
- {name: "CMPBmem", argLength: 3, reg: gp1flagsLoad, asm: "CMPB", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPQload", argLength: 3, reg: gp1flagsLoad, asm: "CMPQ", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPLload", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPWload", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPBload", argLength: 3, reg: gp1flagsLoad, asm: "CMPB", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
// compare *(arg0+ValAndOff(AuxInt).Off()+aux) to ValAndOff(AuxInt).Val() (in that order). arg1=mem.
- {name: "CMPQconstmem", argLength: 2, reg: gp0flagsLoad, asm: "CMPQ", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
- {name: "CMPLconstmem", argLength: 2, reg: gp0flagsLoad, asm: "CMPL", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
- {name: "CMPWconstmem", argLength: 2, reg: gp0flagsLoad, asm: "CMPW", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
- {name: "CMPBconstmem", argLength: 2, reg: gp0flagsLoad, asm: "CMPB", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPQconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPQ", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPLconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPL", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPWconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPW", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPBconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPB", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
{name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags"}, // arg0 compare to arg1, f32
{name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags"}, // arg0 compare to arg1, f64
{name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15
{name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7
- {name: "ADDLmem", argLength: 3, reg: gp21load, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
- {name: "ADDQmem", argLength: 3, reg: gp21load, asm: "ADDQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
- {name: "SUBQmem", argLength: 3, reg: gp21load, asm: "SUBQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
- {name: "SUBLmem", argLength: 3, reg: gp21load, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
- {name: "ANDLmem", argLength: 3, reg: gp21load, asm: "ANDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
- {name: "ANDQmem", argLength: 3, reg: gp21load, asm: "ANDQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
- {name: "ORQmem", argLength: 3, reg: gp21load, asm: "ORQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
- {name: "ORLmem", argLength: 3, reg: gp21load, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
- {name: "XORQmem", argLength: 3, reg: gp21load, asm: "XORQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
- {name: "XORLmem", argLength: 3, reg: gp21load, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ADDLload", argLength: 3, reg: gp21load, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ADDQload", argLength: 3, reg: gp21load, asm: "ADDQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBQload", argLength: 3, reg: gp21load, asm: "SUBQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBLload", argLength: 3, reg: gp21load, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ANDLload", argLength: 3, reg: gp21load, asm: "ANDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ANDQload", argLength: 3, reg: gp21load, asm: "ANDQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ORQload", argLength: 3, reg: gp21load, asm: "ORQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ORLload", argLength: 3, reg: gp21load, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "XORQload", argLength: 3, reg: gp21load, asm: "XORQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "XORLload", argLength: 3, reg: gp21load, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
// unary ops
{name: "NEGQ", argLength: 1, reg: gp11, asm: "NEGQ", resultInArg0: true, clobberFlags: true}, // -arg0
{name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0
{name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0
// Variants that store result to memory
- {name: "SETEQmem", argLength: 3, reg: gpstoreconst, asm: "SETEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract == condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETNEmem", argLength: 3, reg: gpstoreconst, asm: "SETNE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract != condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETLmem", argLength: 3, reg: gpstoreconst, asm: "SETLT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed < condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETLEmem", argLength: 3, reg: gpstoreconst, asm: "SETLE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed <= condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETGmem", argLength: 3, reg: gpstoreconst, asm: "SETGT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed > condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETGEmem", argLength: 3, reg: gpstoreconst, asm: "SETGE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed >= condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETBmem", argLength: 3, reg: gpstoreconst, asm: "SETCS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned < condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETBEmem", argLength: 3, reg: gpstoreconst, asm: "SETLS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned <= condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETAmem", argLength: 3, reg: gpstoreconst, asm: "SETHI", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned > condition from arg1 to arg0+auxint+aux, arg2=mem
- {name: "SETAEmem", argLength: 3, reg: gpstoreconst, asm: "SETCC", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned >= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETEQstore", argLength: 3, reg: gpstoreconst, asm: "SETEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract == condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETNEstore", argLength: 3, reg: gpstoreconst, asm: "SETNE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract != condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETLstore", argLength: 3, reg: gpstoreconst, asm: "SETLT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed < condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETLEstore", argLength: 3, reg: gpstoreconst, asm: "SETLE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed <= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETGstore", argLength: 3, reg: gpstoreconst, asm: "SETGT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed > condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETGEstore", argLength: 3, reg: gpstoreconst, asm: "SETGE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed >= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETBstore", argLength: 3, reg: gpstoreconst, asm: "SETCS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned < condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETBEstore", argLength: 3, reg: gpstoreconst, asm: "SETLS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned <= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETAstore", argLength: 3, reg: gpstoreconst, asm: "SETHI", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned > condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETAEstore", argLength: 3, reg: gpstoreconst, asm: "SETCC", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned >= condition from arg1 to arg0+auxint+aux, arg2=mem
// Need different opcodes for floating point conditions because
// any comparison involving a NaN is always FALSE and thus
// the patterns for inverting conditions cannot be used.
return rewriteValue386_Op386ADDLcarry_0(v)
case Op386ADDLconst:
return rewriteValue386_Op386ADDLconst_0(v)
- case Op386ADDLmem:
- return rewriteValue386_Op386ADDLmem_0(v)
+ case Op386ADDLload:
+ return rewriteValue386_Op386ADDLload_0(v)
case Op386ADDLmodify:
return rewriteValue386_Op386ADDLmodify_0(v)
case Op386ADDSD:
return rewriteValue386_Op386ADDSD_0(v)
- case Op386ADDSDmem:
- return rewriteValue386_Op386ADDSDmem_0(v)
+ case Op386ADDSDload:
+ return rewriteValue386_Op386ADDSDload_0(v)
case Op386ADDSS:
return rewriteValue386_Op386ADDSS_0(v)
- case Op386ADDSSmem:
- return rewriteValue386_Op386ADDSSmem_0(v)
+ case Op386ADDSSload:
+ return rewriteValue386_Op386ADDSSload_0(v)
case Op386ANDL:
return rewriteValue386_Op386ANDL_0(v)
case Op386ANDLconst:
return rewriteValue386_Op386ANDLconst_0(v)
- case Op386ANDLmem:
- return rewriteValue386_Op386ANDLmem_0(v)
+ case Op386ANDLload:
+ return rewriteValue386_Op386ANDLload_0(v)
case Op386ANDLmodify:
return rewriteValue386_Op386ANDLmodify_0(v)
case Op386CMPB:
return rewriteValue386_Op386MULLconst_0(v) || rewriteValue386_Op386MULLconst_10(v) || rewriteValue386_Op386MULLconst_20(v) || rewriteValue386_Op386MULLconst_30(v)
case Op386MULSD:
return rewriteValue386_Op386MULSD_0(v)
- case Op386MULSDmem:
- return rewriteValue386_Op386MULSDmem_0(v)
+ case Op386MULSDload:
+ return rewriteValue386_Op386MULSDload_0(v)
case Op386MULSS:
return rewriteValue386_Op386MULSS_0(v)
- case Op386MULSSmem:
- return rewriteValue386_Op386MULSSmem_0(v)
+ case Op386MULSSload:
+ return rewriteValue386_Op386MULSSload_0(v)
case Op386NEGL:
return rewriteValue386_Op386NEGL_0(v)
case Op386NOTL:
return rewriteValue386_Op386ORL_0(v) || rewriteValue386_Op386ORL_10(v) || rewriteValue386_Op386ORL_20(v) || rewriteValue386_Op386ORL_30(v) || rewriteValue386_Op386ORL_40(v) || rewriteValue386_Op386ORL_50(v)
case Op386ORLconst:
return rewriteValue386_Op386ORLconst_0(v)
- case Op386ORLmem:
- return rewriteValue386_Op386ORLmem_0(v)
+ case Op386ORLload:
+ return rewriteValue386_Op386ORLload_0(v)
case Op386ORLmodify:
return rewriteValue386_Op386ORLmodify_0(v)
case Op386ROLBconst:
return rewriteValue386_Op386SUBLcarry_0(v)
case Op386SUBLconst:
return rewriteValue386_Op386SUBLconst_0(v)
- case Op386SUBLmem:
- return rewriteValue386_Op386SUBLmem_0(v)
+ case Op386SUBLload:
+ return rewriteValue386_Op386SUBLload_0(v)
case Op386SUBLmodify:
return rewriteValue386_Op386SUBLmodify_0(v)
case Op386SUBSD:
return rewriteValue386_Op386SUBSD_0(v)
- case Op386SUBSDmem:
- return rewriteValue386_Op386SUBSDmem_0(v)
+ case Op386SUBSDload:
+ return rewriteValue386_Op386SUBSDload_0(v)
case Op386SUBSS:
return rewriteValue386_Op386SUBSS_0(v)
- case Op386SUBSSmem:
- return rewriteValue386_Op386SUBSSmem_0(v)
+ case Op386SUBSSload:
+ return rewriteValue386_Op386SUBSSload_0(v)
case Op386XORL:
return rewriteValue386_Op386XORL_0(v) || rewriteValue386_Op386XORL_10(v)
case Op386XORLconst:
return rewriteValue386_Op386XORLconst_0(v)
- case Op386XORLmem:
- return rewriteValue386_Op386XORLmem_0(v)
+ case Op386XORLload:
+ return rewriteValue386_Op386XORLload_0(v)
case Op386XORLmodify:
return rewriteValue386_Op386XORLmodify_0(v)
case OpAdd16:
}
// match: (ADDL x l:(MOVLload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ADDLmem x [off] {sym} ptr mem)
+ // result: (ADDLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(Op386ADDLmem)
+ v.reset(Op386ADDLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (ADDL l:(MOVLload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ADDLmem x [off] {sym} ptr mem)
+ // result: (ADDLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(Op386ADDLmem)
+ v.reset(Op386ADDLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValue386_Op386ADDLmem_0(v *Value) bool {
+func rewriteValue386_Op386ADDLload_0(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (ADDLmem [off1] {sym} val (ADDLconst [off2] base) mem)
+ // match: (ADDLload [off1] {sym} val (ADDLconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (ADDLmem [off1+off2] {sym} val base mem)
+ // result: (ADDLload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386ADDLmem)
+ v.reset(Op386ADDLload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ADDLmem [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // match: (ADDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ADDLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- v.reset(Op386ADDLmem)
+ v.reset(Op386ADDLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
_ = config
// match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l)
- // result: (ADDSDmem x [off] {sym} ptr mem)
+ // result: (ADDSDload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) {
break
}
- v.reset(Op386ADDSDmem)
+ v.reset(Op386ADDSDload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l)
- // result: (ADDSDmem x [off] {sym} ptr mem)
+ // result: (ADDSDload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) {
break
}
- v.reset(Op386ADDSDmem)
+ v.reset(Op386ADDSDload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValue386_Op386ADDSDmem_0(v *Value) bool {
+func rewriteValue386_Op386ADDSDload_0(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (ADDSDmem [off1] {sym} val (ADDLconst [off2] base) mem)
+ // match: (ADDSDload [off1] {sym} val (ADDLconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (ADDSDmem [off1+off2] {sym} val base mem)
+ // result: (ADDSDload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386ADDSDmem)
+ v.reset(Op386ADDSDload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ADDSDmem [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // match: (ADDSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ADDSDmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- v.reset(Op386ADDSDmem)
+ v.reset(Op386ADDSDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
_ = config
// match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l)
- // result: (ADDSSmem x [off] {sym} ptr mem)
+ // result: (ADDSSload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) {
break
}
- v.reset(Op386ADDSSmem)
+ v.reset(Op386ADDSSload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l)
- // result: (ADDSSmem x [off] {sym} ptr mem)
+ // result: (ADDSSload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) {
break
}
- v.reset(Op386ADDSSmem)
+ v.reset(Op386ADDSSload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValue386_Op386ADDSSmem_0(v *Value) bool {
+func rewriteValue386_Op386ADDSSload_0(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (ADDSSmem [off1] {sym} val (ADDLconst [off2] base) mem)
+ // match: (ADDSSload [off1] {sym} val (ADDLconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (ADDSSmem [off1+off2] {sym} val base mem)
+ // result: (ADDSSload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386ADDSSmem)
+ v.reset(Op386ADDSSload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ADDSSmem [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // match: (ADDSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ADDSSmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- v.reset(Op386ADDSSmem)
+ v.reset(Op386ADDSSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
}
// match: (ANDL x l:(MOVLload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ANDLmem x [off] {sym} ptr mem)
+ // result: (ANDLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(Op386ANDLmem)
+ v.reset(Op386ANDLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (ANDL l:(MOVLload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ANDLmem x [off] {sym} ptr mem)
+ // result: (ANDLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(Op386ANDLmem)
+ v.reset(Op386ANDLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValue386_Op386ANDLmem_0(v *Value) bool {
+func rewriteValue386_Op386ANDLload_0(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (ANDLmem [off1] {sym} val (ADDLconst [off2] base) mem)
+ // match: (ANDLload [off1] {sym} val (ADDLconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (ANDLmem [off1+off2] {sym} val base mem)
+ // result: (ANDLload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386ANDLmem)
+ v.reset(Op386ANDLload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ANDLmem [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // match: (ANDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ANDLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- v.reset(Op386ANDLmem)
+ v.reset(Op386ANDLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (MOVLstore {sym} [off] ptr y:(ADDLmem x [off] {sym} ptr mem) mem)
+ // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem)
// cond: y.Uses==1 && clobber(y)
// result: (ADDLmodify [off] {sym} ptr x mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
y := v.Args[1]
- if y.Op != Op386ADDLmem {
+ if y.Op != Op386ADDLload {
break
}
if y.AuxInt != off {
v.AddArg(mem)
return true
}
- // match: (MOVLstore {sym} [off] ptr y:(ANDLmem x [off] {sym} ptr mem) mem)
+ // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem)
// cond: y.Uses==1 && clobber(y)
// result: (ANDLmodify [off] {sym} ptr x mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
y := v.Args[1]
- if y.Op != Op386ANDLmem {
+ if y.Op != Op386ANDLload {
break
}
if y.AuxInt != off {
v.AddArg(mem)
return true
}
- // match: (MOVLstore {sym} [off] ptr y:(ORLmem x [off] {sym} ptr mem) mem)
+ // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem)
// cond: y.Uses==1 && clobber(y)
// result: (ORLmodify [off] {sym} ptr x mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
y := v.Args[1]
- if y.Op != Op386ORLmem {
+ if y.Op != Op386ORLload {
break
}
if y.AuxInt != off {
v.AddArg(mem)
return true
}
- // match: (MOVLstore {sym} [off] ptr y:(XORLmem x [off] {sym} ptr mem) mem)
+ // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem)
// cond: y.Uses==1 && clobber(y)
// result: (XORLmodify [off] {sym} ptr x mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
y := v.Args[1]
- if y.Op != Op386XORLmem {
+ if y.Op != Op386XORLload {
break
}
if y.AuxInt != off {
_ = config
// match: (MULSD x l:(MOVSDload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l)
- // result: (MULSDmem x [off] {sym} ptr mem)
+ // result: (MULSDload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) {
break
}
- v.reset(Op386MULSDmem)
+ v.reset(Op386MULSDload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l)
- // result: (MULSDmem x [off] {sym} ptr mem)
+ // result: (MULSDload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) {
break
}
- v.reset(Op386MULSDmem)
+ v.reset(Op386MULSDload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValue386_Op386MULSDmem_0(v *Value) bool {
+func rewriteValue386_Op386MULSDload_0(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (MULSDmem [off1] {sym} val (ADDLconst [off2] base) mem)
+ // match: (MULSDload [off1] {sym} val (ADDLconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (MULSDmem [off1+off2] {sym} val base mem)
+ // result: (MULSDload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386MULSDmem)
+ v.reset(Op386MULSDload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (MULSDmem [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // match: (MULSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MULSDmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- v.reset(Op386MULSDmem)
+ v.reset(Op386MULSDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
_ = config
// match: (MULSS x l:(MOVSSload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l)
- // result: (MULSSmem x [off] {sym} ptr mem)
+ // result: (MULSSload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) {
break
}
- v.reset(Op386MULSSmem)
+ v.reset(Op386MULSSload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l)
- // result: (MULSSmem x [off] {sym} ptr mem)
+ // result: (MULSSload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) {
break
}
- v.reset(Op386MULSSmem)
+ v.reset(Op386MULSSload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValue386_Op386MULSSmem_0(v *Value) bool {
+func rewriteValue386_Op386MULSSload_0(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (MULSSmem [off1] {sym} val (ADDLconst [off2] base) mem)
+ // match: (MULSSload [off1] {sym} val (ADDLconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (MULSSmem [off1+off2] {sym} val base mem)
+ // result: (MULSSload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386MULSSmem)
+ v.reset(Op386MULSSload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (MULSSmem [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // match: (MULSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MULSSmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- v.reset(Op386MULSSmem)
+ v.reset(Op386MULSSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
}
// match: (ORL x l:(MOVLload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ORLmem x [off] {sym} ptr mem)
+ // result: (ORLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(Op386ORLmem)
+ v.reset(Op386ORLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (ORL l:(MOVLload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ORLmem x [off] {sym} ptr mem)
+ // result: (ORLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(Op386ORLmem)
+ v.reset(Op386ORLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValue386_Op386ORLmem_0(v *Value) bool {
+func rewriteValue386_Op386ORLload_0(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (ORLmem [off1] {sym} val (ADDLconst [off2] base) mem)
+ // match: (ORLload [off1] {sym} val (ADDLconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (ORLmem [off1+off2] {sym} val base mem)
+ // result: (ORLload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386ORLmem)
+ v.reset(Op386ORLload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ORLmem [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // match: (ORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ORLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- v.reset(Op386ORLmem)
+ v.reset(Op386ORLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
}
// match: (SUBL x l:(MOVLload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (SUBLmem x [off] {sym} ptr mem)
+ // result: (SUBLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(Op386SUBLmem)
+ v.reset(Op386SUBLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
return true
}
}
-func rewriteValue386_Op386SUBLmem_0(v *Value) bool {
+func rewriteValue386_Op386SUBLload_0(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (SUBLmem [off1] {sym} val (ADDLconst [off2] base) mem)
+ // match: (SUBLload [off1] {sym} val (ADDLconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (SUBLmem [off1+off2] {sym} val base mem)
+ // result: (SUBLload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386SUBLmem)
+ v.reset(Op386SUBLload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (SUBLmem [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // match: (SUBLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (SUBLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- v.reset(Op386SUBLmem)
+ v.reset(Op386SUBLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
_ = config
// match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l)
- // result: (SUBSDmem x [off] {sym} ptr mem)
+ // result: (SUBSDload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) {
break
}
- v.reset(Op386SUBSDmem)
+ v.reset(Op386SUBSDload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValue386_Op386SUBSDmem_0(v *Value) bool {
+func rewriteValue386_Op386SUBSDload_0(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (SUBSDmem [off1] {sym} val (ADDLconst [off2] base) mem)
+ // match: (SUBSDload [off1] {sym} val (ADDLconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (SUBSDmem [off1+off2] {sym} val base mem)
+ // result: (SUBSDload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386SUBSDmem)
+ v.reset(Op386SUBSDload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (SUBSDmem [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // match: (SUBSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (SUBSDmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- v.reset(Op386SUBSDmem)
+ v.reset(Op386SUBSDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
_ = config
// match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l)
- // result: (SUBSSmem x [off] {sym} ptr mem)
+ // result: (SUBSSload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) {
break
}
- v.reset(Op386SUBSSmem)
+ v.reset(Op386SUBSSload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValue386_Op386SUBSSmem_0(v *Value) bool {
+func rewriteValue386_Op386SUBSSload_0(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (SUBSSmem [off1] {sym} val (ADDLconst [off2] base) mem)
+ // match: (SUBSSload [off1] {sym} val (ADDLconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (SUBSSmem [off1+off2] {sym} val base mem)
+ // result: (SUBSSload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386SUBSSmem)
+ v.reset(Op386SUBSSload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (SUBSSmem [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // match: (SUBSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (SUBSSmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- v.reset(Op386SUBSSmem)
+ v.reset(Op386SUBSSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
}
// match: (XORL x l:(MOVLload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (XORLmem x [off] {sym} ptr mem)
+ // result: (XORLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(Op386XORLmem)
+ v.reset(Op386XORLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (XORL l:(MOVLload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (XORLmem x [off] {sym} ptr mem)
+ // result: (XORLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(Op386XORLmem)
+ v.reset(Op386XORLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValue386_Op386XORLmem_0(v *Value) bool {
+func rewriteValue386_Op386XORLload_0(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (XORLmem [off1] {sym} val (ADDLconst [off2] base) mem)
+ // match: (XORLload [off1] {sym} val (ADDLconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (XORLmem [off1+off2] {sym} val base mem)
+ // result: (XORLload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386XORLmem)
+ v.reset(Op386XORLload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (XORLmem [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // match: (XORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (XORLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- v.reset(Op386XORLmem)
+ v.reset(Op386XORLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) || rewriteValueAMD64_OpAMD64ADDL_20(v)
case OpAMD64ADDLconst:
return rewriteValueAMD64_OpAMD64ADDLconst_0(v) || rewriteValueAMD64_OpAMD64ADDLconst_10(v)
- case OpAMD64ADDLconstmem:
- return rewriteValueAMD64_OpAMD64ADDLconstmem_0(v)
- case OpAMD64ADDLmem:
- return rewriteValueAMD64_OpAMD64ADDLmem_0(v)
+ case OpAMD64ADDLconstmodify:
+ return rewriteValueAMD64_OpAMD64ADDLconstmodify_0(v)
+ case OpAMD64ADDLload:
+ return rewriteValueAMD64_OpAMD64ADDLload_0(v)
case OpAMD64ADDQ:
return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v)
case OpAMD64ADDQconst:
return rewriteValueAMD64_OpAMD64ADDQconst_0(v) || rewriteValueAMD64_OpAMD64ADDQconst_10(v)
- case OpAMD64ADDQconstmem:
- return rewriteValueAMD64_OpAMD64ADDQconstmem_0(v)
- case OpAMD64ADDQmem:
- return rewriteValueAMD64_OpAMD64ADDQmem_0(v)
+ case OpAMD64ADDQconstmodify:
+ return rewriteValueAMD64_OpAMD64ADDQconstmodify_0(v)
+ case OpAMD64ADDQload:
+ return rewriteValueAMD64_OpAMD64ADDQload_0(v)
case OpAMD64ADDSD:
return rewriteValueAMD64_OpAMD64ADDSD_0(v)
- case OpAMD64ADDSDmem:
- return rewriteValueAMD64_OpAMD64ADDSDmem_0(v)
+ case OpAMD64ADDSDload:
+ return rewriteValueAMD64_OpAMD64ADDSDload_0(v)
case OpAMD64ADDSS:
return rewriteValueAMD64_OpAMD64ADDSS_0(v)
- case OpAMD64ADDSSmem:
- return rewriteValueAMD64_OpAMD64ADDSSmem_0(v)
+ case OpAMD64ADDSSload:
+ return rewriteValueAMD64_OpAMD64ADDSSload_0(v)
case OpAMD64ANDL:
return rewriteValueAMD64_OpAMD64ANDL_0(v)
case OpAMD64ANDLconst:
return rewriteValueAMD64_OpAMD64ANDLconst_0(v)
- case OpAMD64ANDLmem:
- return rewriteValueAMD64_OpAMD64ANDLmem_0(v)
+ case OpAMD64ANDLload:
+ return rewriteValueAMD64_OpAMD64ANDLload_0(v)
case OpAMD64ANDQ:
return rewriteValueAMD64_OpAMD64ANDQ_0(v)
case OpAMD64ANDQconst:
return rewriteValueAMD64_OpAMD64ANDQconst_0(v)
- case OpAMD64ANDQmem:
- return rewriteValueAMD64_OpAMD64ANDQmem_0(v)
+ case OpAMD64ANDQload:
+ return rewriteValueAMD64_OpAMD64ANDQload_0(v)
case OpAMD64BSFQ:
return rewriteValueAMD64_OpAMD64BSFQ_0(v)
case OpAMD64BTLconst:
return rewriteValueAMD64_OpAMD64CMPB_0(v)
case OpAMD64CMPBconst:
return rewriteValueAMD64_OpAMD64CMPBconst_0(v)
- case OpAMD64CMPBmem:
- return rewriteValueAMD64_OpAMD64CMPBmem_0(v)
+ case OpAMD64CMPBload:
+ return rewriteValueAMD64_OpAMD64CMPBload_0(v)
case OpAMD64CMPL:
return rewriteValueAMD64_OpAMD64CMPL_0(v)
case OpAMD64CMPLconst:
return rewriteValueAMD64_OpAMD64CMPLconst_0(v) || rewriteValueAMD64_OpAMD64CMPLconst_10(v)
- case OpAMD64CMPLmem:
- return rewriteValueAMD64_OpAMD64CMPLmem_0(v)
+ case OpAMD64CMPLload:
+ return rewriteValueAMD64_OpAMD64CMPLload_0(v)
case OpAMD64CMPQ:
return rewriteValueAMD64_OpAMD64CMPQ_0(v)
case OpAMD64CMPQconst:
return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v)
- case OpAMD64CMPQmem:
- return rewriteValueAMD64_OpAMD64CMPQmem_0(v)
+ case OpAMD64CMPQload:
+ return rewriteValueAMD64_OpAMD64CMPQload_0(v)
case OpAMD64CMPW:
return rewriteValueAMD64_OpAMD64CMPW_0(v)
case OpAMD64CMPWconst:
return rewriteValueAMD64_OpAMD64CMPWconst_0(v)
- case OpAMD64CMPWmem:
- return rewriteValueAMD64_OpAMD64CMPWmem_0(v)
+ case OpAMD64CMPWload:
+ return rewriteValueAMD64_OpAMD64CMPWload_0(v)
case OpAMD64CMPXCHGLlock:
return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v)
case OpAMD64CMPXCHGQlock:
return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) || rewriteValueAMD64_OpAMD64MULQconst_30(v)
case OpAMD64MULSD:
return rewriteValueAMD64_OpAMD64MULSD_0(v)
- case OpAMD64MULSDmem:
- return rewriteValueAMD64_OpAMD64MULSDmem_0(v)
+ case OpAMD64MULSDload:
+ return rewriteValueAMD64_OpAMD64MULSDload_0(v)
case OpAMD64MULSS:
return rewriteValueAMD64_OpAMD64MULSS_0(v)
- case OpAMD64MULSSmem:
- return rewriteValueAMD64_OpAMD64MULSSmem_0(v)
+ case OpAMD64MULSSload:
+ return rewriteValueAMD64_OpAMD64MULSSload_0(v)
case OpAMD64NEGL:
return rewriteValueAMD64_OpAMD64NEGL_0(v)
case OpAMD64NEGQ:
return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v)
case OpAMD64ORLconst:
return rewriteValueAMD64_OpAMD64ORLconst_0(v)
- case OpAMD64ORLmem:
- return rewriteValueAMD64_OpAMD64ORLmem_0(v)
+ case OpAMD64ORLload:
+ return rewriteValueAMD64_OpAMD64ORLload_0(v)
case OpAMD64ORQ:
return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v)
case OpAMD64ORQconst:
return rewriteValueAMD64_OpAMD64ORQconst_0(v)
- case OpAMD64ORQmem:
- return rewriteValueAMD64_OpAMD64ORQmem_0(v)
+ case OpAMD64ORQload:
+ return rewriteValueAMD64_OpAMD64ORQload_0(v)
case OpAMD64ROLB:
return rewriteValueAMD64_OpAMD64ROLB_0(v)
case OpAMD64ROLBconst:
return rewriteValueAMD64_OpAMD64SETA_0(v)
case OpAMD64SETAE:
return rewriteValueAMD64_OpAMD64SETAE_0(v)
- case OpAMD64SETAEmem:
- return rewriteValueAMD64_OpAMD64SETAEmem_0(v)
- case OpAMD64SETAmem:
- return rewriteValueAMD64_OpAMD64SETAmem_0(v)
+ case OpAMD64SETAEstore:
+ return rewriteValueAMD64_OpAMD64SETAEstore_0(v)
+ case OpAMD64SETAstore:
+ return rewriteValueAMD64_OpAMD64SETAstore_0(v)
case OpAMD64SETB:
return rewriteValueAMD64_OpAMD64SETB_0(v)
case OpAMD64SETBE:
return rewriteValueAMD64_OpAMD64SETBE_0(v)
- case OpAMD64SETBEmem:
- return rewriteValueAMD64_OpAMD64SETBEmem_0(v)
- case OpAMD64SETBmem:
- return rewriteValueAMD64_OpAMD64SETBmem_0(v)
+ case OpAMD64SETBEstore:
+ return rewriteValueAMD64_OpAMD64SETBEstore_0(v)
+ case OpAMD64SETBstore:
+ return rewriteValueAMD64_OpAMD64SETBstore_0(v)
case OpAMD64SETEQ:
return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) || rewriteValueAMD64_OpAMD64SETEQ_20(v)
- case OpAMD64SETEQmem:
- return rewriteValueAMD64_OpAMD64SETEQmem_0(v) || rewriteValueAMD64_OpAMD64SETEQmem_10(v) || rewriteValueAMD64_OpAMD64SETEQmem_20(v)
+ case OpAMD64SETEQstore:
+ return rewriteValueAMD64_OpAMD64SETEQstore_0(v) || rewriteValueAMD64_OpAMD64SETEQstore_10(v) || rewriteValueAMD64_OpAMD64SETEQstore_20(v)
case OpAMD64SETG:
return rewriteValueAMD64_OpAMD64SETG_0(v)
case OpAMD64SETGE:
return rewriteValueAMD64_OpAMD64SETGE_0(v)
- case OpAMD64SETGEmem:
- return rewriteValueAMD64_OpAMD64SETGEmem_0(v)
- case OpAMD64SETGmem:
- return rewriteValueAMD64_OpAMD64SETGmem_0(v)
+ case OpAMD64SETGEstore:
+ return rewriteValueAMD64_OpAMD64SETGEstore_0(v)
+ case OpAMD64SETGstore:
+ return rewriteValueAMD64_OpAMD64SETGstore_0(v)
case OpAMD64SETL:
return rewriteValueAMD64_OpAMD64SETL_0(v)
case OpAMD64SETLE:
return rewriteValueAMD64_OpAMD64SETLE_0(v)
- case OpAMD64SETLEmem:
- return rewriteValueAMD64_OpAMD64SETLEmem_0(v)
- case OpAMD64SETLmem:
- return rewriteValueAMD64_OpAMD64SETLmem_0(v)
+ case OpAMD64SETLEstore:
+ return rewriteValueAMD64_OpAMD64SETLEstore_0(v)
+ case OpAMD64SETLstore:
+ return rewriteValueAMD64_OpAMD64SETLstore_0(v)
case OpAMD64SETNE:
return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) || rewriteValueAMD64_OpAMD64SETNE_20(v)
- case OpAMD64SETNEmem:
- return rewriteValueAMD64_OpAMD64SETNEmem_0(v) || rewriteValueAMD64_OpAMD64SETNEmem_10(v) || rewriteValueAMD64_OpAMD64SETNEmem_20(v)
+ case OpAMD64SETNEstore:
+ return rewriteValueAMD64_OpAMD64SETNEstore_0(v) || rewriteValueAMD64_OpAMD64SETNEstore_10(v) || rewriteValueAMD64_OpAMD64SETNEstore_20(v)
case OpAMD64SHLL:
return rewriteValueAMD64_OpAMD64SHLL_0(v)
case OpAMD64SHLLconst:
return rewriteValueAMD64_OpAMD64SUBL_0(v)
case OpAMD64SUBLconst:
return rewriteValueAMD64_OpAMD64SUBLconst_0(v)
- case OpAMD64SUBLmem:
- return rewriteValueAMD64_OpAMD64SUBLmem_0(v)
+ case OpAMD64SUBLload:
+ return rewriteValueAMD64_OpAMD64SUBLload_0(v)
case OpAMD64SUBQ:
return rewriteValueAMD64_OpAMD64SUBQ_0(v)
case OpAMD64SUBQconst:
return rewriteValueAMD64_OpAMD64SUBQconst_0(v)
- case OpAMD64SUBQmem:
- return rewriteValueAMD64_OpAMD64SUBQmem_0(v)
+ case OpAMD64SUBQload:
+ return rewriteValueAMD64_OpAMD64SUBQload_0(v)
case OpAMD64SUBSD:
return rewriteValueAMD64_OpAMD64SUBSD_0(v)
- case OpAMD64SUBSDmem:
- return rewriteValueAMD64_OpAMD64SUBSDmem_0(v)
+ case OpAMD64SUBSDload:
+ return rewriteValueAMD64_OpAMD64SUBSDload_0(v)
case OpAMD64SUBSS:
return rewriteValueAMD64_OpAMD64SUBSS_0(v)
- case OpAMD64SUBSSmem:
- return rewriteValueAMD64_OpAMD64SUBSSmem_0(v)
+ case OpAMD64SUBSSload:
+ return rewriteValueAMD64_OpAMD64SUBSSload_0(v)
case OpAMD64TESTB:
return rewriteValueAMD64_OpAMD64TESTB_0(v)
case OpAMD64TESTBconst:
return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v)
case OpAMD64XORLconst:
return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v)
- case OpAMD64XORLmem:
- return rewriteValueAMD64_OpAMD64XORLmem_0(v)
+ case OpAMD64XORLload:
+ return rewriteValueAMD64_OpAMD64XORLload_0(v)
case OpAMD64XORQ:
return rewriteValueAMD64_OpAMD64XORQ_0(v) || rewriteValueAMD64_OpAMD64XORQ_10(v)
case OpAMD64XORQconst:
return rewriteValueAMD64_OpAMD64XORQconst_0(v)
- case OpAMD64XORQmem:
- return rewriteValueAMD64_OpAMD64XORQmem_0(v)
+ case OpAMD64XORQload:
+ return rewriteValueAMD64_OpAMD64XORQload_0(v)
case OpAdd16:
return rewriteValueAMD64_OpAdd16_0(v)
case OpAdd32:
}
// match: (ADDL x l:(MOVLload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ADDLmem x [off] {sym} ptr mem)
+ // result: (ADDLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64ADDLmem)
+ v.reset(OpAMD64ADDLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (ADDL l:(MOVLload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ADDLmem x [off] {sym} ptr mem)
+ // result: (ADDLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64ADDLmem)
+ v.reset(OpAMD64ADDLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValueAMD64_OpAMD64ADDLconstmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64ADDLconstmodify_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (ADDLconstmem [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // match: (ADDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
// cond: ValAndOff(valoff1).canAdd(off2)
- // result: (ADDLconstmem [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ // result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
for {
valoff1 := v.AuxInt
sym := v.Aux
if !(ValAndOff(valoff1).canAdd(off2)) {
break
}
- v.reset(OpAMD64ADDLconstmem)
+ v.reset(OpAMD64ADDLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (ADDLconstmem [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (ADDLconstmem [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
for {
valoff1 := v.AuxInt
sym1 := v.Aux
if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64ADDLconstmem)
+ v.reset(OpAMD64ADDLconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _))
+ // match: (ADDLconstmodify [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _))
// cond:
// result: (ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x))
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64ADDLmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64ADDLload_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (ADDLmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (ADDLload [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (ADDLmem [off1+off2] {sym} val base mem)
+ // result: (ADDLload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64ADDLmem)
+ v.reset(OpAMD64ADDLload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ADDLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ADDLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64ADDLmem)
+ v.reset(OpAMD64ADDLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond:
// result: (ADDL x (MOVLf2i y))
for {
}
// match: (ADDQ x l:(MOVQload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ADDQmem x [off] {sym} ptr mem)
+ // result: (ADDQload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64ADDQmem)
+ v.reset(OpAMD64ADDQload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ADDQmem x [off] {sym} ptr mem)
+ // result: (ADDQload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64ADDQmem)
+ v.reset(OpAMD64ADDQload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValueAMD64_OpAMD64ADDQconstmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64ADDQconstmodify_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (ADDQconstmem [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // match: (ADDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
// cond: ValAndOff(valoff1).canAdd(off2)
- // result: (ADDQconstmem [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ // result: (ADDQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
for {
valoff1 := v.AuxInt
sym := v.Aux
if !(ValAndOff(valoff1).canAdd(off2)) {
break
}
- v.reset(OpAMD64ADDQconstmem)
+ v.reset(OpAMD64ADDQconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (ADDQconstmem [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (ADDQconstmem [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // result: (ADDQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
for {
valoff1 := v.AuxInt
sym1 := v.Aux
if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64ADDQconstmem)
+ v.reset(OpAMD64ADDQconstmodify)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _))
+ // match: (ADDQconstmodify [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _))
// cond:
// result: (ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x))
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64ADDQmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64ADDQload_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (ADDQmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (ADDQload [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (ADDQmem [off1+off2] {sym} val base mem)
+ // result: (ADDQload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64ADDQmem)
+ v.reset(OpAMD64ADDQload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ADDQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ADDQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (ADDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64ADDQmem)
+ v.reset(OpAMD64ADDQload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// cond:
// result: (ADDQ x (MOVQf2i y))
for {
func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool {
// match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ADDSDmem x [off] {sym} ptr mem)
+ // result: (ADDSDload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64ADDSDmem)
+ v.reset(OpAMD64ADDSDload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ADDSDmem x [off] {sym} ptr mem)
+ // result: (ADDSDload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64ADDSDmem)
+ v.reset(OpAMD64ADDSDload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValueAMD64_OpAMD64ADDSDmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64ADDSDload_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (ADDSDmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (ADDSDload [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (ADDSDmem [off1+off2] {sym} val base mem)
+ // result: (ADDSDload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64ADDSDmem)
+ v.reset(OpAMD64ADDSDload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ADDSDmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ADDSDmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64ADDSDmem)
+ v.reset(OpAMD64ADDSDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
+ // match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
// cond:
// result: (ADDSD x (MOVQi2f y))
for {
func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool {
// match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ADDSSmem x [off] {sym} ptr mem)
+ // result: (ADDSSload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64ADDSSmem)
+ v.reset(OpAMD64ADDSSload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ADDSSmem x [off] {sym} ptr mem)
+ // result: (ADDSSload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64ADDSSmem)
+ v.reset(OpAMD64ADDSSload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValueAMD64_OpAMD64ADDSSmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64ADDSSload_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (ADDSSmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (ADDSSload [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (ADDSSmem [off1+off2] {sym} val base mem)
+ // result: (ADDSSload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64ADDSSmem)
+ v.reset(OpAMD64ADDSSload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ADDSSmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ADDSSmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64ADDSSmem)
+ v.reset(OpAMD64ADDSSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
+ // match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
// cond:
// result: (ADDSS x (MOVLi2f y))
for {
}
// match: (ANDL x l:(MOVLload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ANDLmem x [off] {sym} ptr mem)
+ // result: (ANDLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64ANDLmem)
+ v.reset(OpAMD64ANDLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (ANDL l:(MOVLload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ANDLmem x [off] {sym} ptr mem)
+ // result: (ANDLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64ANDLmem)
+ v.reset(OpAMD64ANDLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValueAMD64_OpAMD64ANDLmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64ANDLload_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (ANDLmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (ANDLload [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (ANDLmem [off1+off2] {sym} val base mem)
+ // result: (ANDLload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64ANDLmem)
+ v.reset(OpAMD64ANDLload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ANDLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ANDLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64ANDLmem)
+ v.reset(OpAMD64ANDLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond:
// result: (ANDL x (MOVLf2i y))
for {
}
// match: (ANDQ x l:(MOVQload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ANDQmem x [off] {sym} ptr mem)
+ // result: (ANDQload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64ANDQmem)
+ v.reset(OpAMD64ANDQload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ANDQmem x [off] {sym} ptr mem)
+ // result: (ANDQload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64ANDQmem)
+ v.reset(OpAMD64ANDQload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValueAMD64_OpAMD64ANDQmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64ANDQload_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (ANDQmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (ANDQload [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (ANDQmem [off1+off2] {sym} val base mem)
+ // result: (ANDQload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64ANDQmem)
+ v.reset(OpAMD64ANDQload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ANDQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ANDQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (ANDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64ANDQmem)
+ v.reset(OpAMD64ANDQload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// cond:
// result: (ANDQ x (MOVQf2i y))
for {
}
// match: (CMPB l:(MOVBload {sym} [off] ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (CMPBmem {sym} [off] ptr x mem)
+ // result: (CMPBload {sym} [off] ptr x mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64CMPBmem)
+ v.reset(OpAMD64CMPBload)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
// match: (CMPB x l:(MOVBload {sym} [off] ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (InvertFlags (CMPBmem {sym} [off] ptr x mem))
+ // result: (InvertFlags (CMPBload {sym} [off] ptr x mem))
for {
_ = v.Args[1]
x := v.Args[0]
break
}
v.reset(OpAMD64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPBmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPBload, types.TypeFlags)
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
}
// match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c])
// cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l)
- // result: @l.Block (CMPBconstmem {sym} [makeValAndOff(c,off)] ptr mem)
+ // result: @l.Block (CMPBconstload {sym} [makeValAndOff(c,off)] ptr mem)
for {
c := v.AuxInt
l := v.Args[0]
break
}
b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPBconstmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPBconstload, types.TypeFlags)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = makeValAndOff(c, off)
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPBmem_0(v *Value) bool {
- // match: (CMPBmem {sym} [off] ptr (MOVLconst [c]) mem)
+func rewriteValueAMD64_OpAMD64CMPBload_0(v *Value) bool {
+ // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem)
// cond: validValAndOff(int64(int8(c)),off)
- // result: (CMPBconstmem {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem)
+ // result: (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem)
for {
off := v.AuxInt
sym := v.Aux
if !(validValAndOff(int64(int8(c)), off)) {
break
}
- v.reset(OpAMD64CMPBconstmem)
+ v.reset(OpAMD64CMPBconstload)
v.AuxInt = makeValAndOff(int64(int8(c)), off)
v.Aux = sym
v.AddArg(ptr)
}
// match: (CMPL l:(MOVLload {sym} [off] ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (CMPLmem {sym} [off] ptr x mem)
+ // result: (CMPLload {sym} [off] ptr x mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64CMPLmem)
+ v.reset(OpAMD64CMPLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
// match: (CMPL x l:(MOVLload {sym} [off] ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (InvertFlags (CMPLmem {sym} [off] ptr x mem))
+ // result: (InvertFlags (CMPLload {sym} [off] ptr x mem))
for {
_ = v.Args[1]
x := v.Args[0]
break
}
v.reset(OpAMD64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPLmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLload, types.TypeFlags)
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
_ = b
// match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c])
// cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l)
- // result: @l.Block (CMPLconstmem {sym} [makeValAndOff(c,off)] ptr mem)
+ // result: @l.Block (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
for {
c := v.AuxInt
l := v.Args[0]
break
}
b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPLconstmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconstload, types.TypeFlags)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = makeValAndOff(c, off)
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPLmem_0(v *Value) bool {
- // match: (CMPLmem {sym} [off] ptr (MOVLconst [c]) mem)
+func rewriteValueAMD64_OpAMD64CMPLload_0(v *Value) bool {
+ // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem)
// cond: validValAndOff(c,off)
- // result: (CMPLconstmem {sym} [makeValAndOff(c,off)] ptr mem)
+ // result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
sym := v.Aux
if !(validValAndOff(c, off)) {
break
}
- v.reset(OpAMD64CMPLconstmem)
+ v.reset(OpAMD64CMPLconstload)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
v.AddArg(ptr)
}
// match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (CMPQmem {sym} [off] ptr x mem)
+ // result: (CMPQload {sym} [off] ptr x mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64CMPQmem)
+ v.reset(OpAMD64CMPQload)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
// match: (CMPQ x l:(MOVQload {sym} [off] ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (InvertFlags (CMPQmem {sym} [off] ptr x mem))
+ // result: (InvertFlags (CMPQload {sym} [off] ptr x mem))
for {
_ = v.Args[1]
x := v.Args[0]
break
}
v.reset(OpAMD64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQload, types.TypeFlags)
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
}
// match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c])
// cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l)
- // result: @l.Block (CMPQconstmem {sym} [makeValAndOff(c,off)] ptr mem)
+ // result: @l.Block (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem)
for {
c := v.AuxInt
l := v.Args[0]
break
}
b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQconstmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconstload, types.TypeFlags)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = makeValAndOff(c, off)
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPQmem_0(v *Value) bool {
- // match: (CMPQmem {sym} [off] ptr (MOVQconst [c]) mem)
+func rewriteValueAMD64_OpAMD64CMPQload_0(v *Value) bool {
+ // match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem)
// cond: validValAndOff(c,off)
- // result: (CMPQconstmem {sym} [makeValAndOff(c,off)] ptr mem)
+ // result: (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
sym := v.Aux
if !(validValAndOff(c, off)) {
break
}
- v.reset(OpAMD64CMPQconstmem)
+ v.reset(OpAMD64CMPQconstload)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
v.AddArg(ptr)
}
// match: (CMPW l:(MOVWload {sym} [off] ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (CMPWmem {sym} [off] ptr x mem)
+ // result: (CMPWload {sym} [off] ptr x mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64CMPWmem)
+ v.reset(OpAMD64CMPWload)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
// match: (CMPW x l:(MOVWload {sym} [off] ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (InvertFlags (CMPWmem {sym} [off] ptr x mem))
+ // result: (InvertFlags (CMPWload {sym} [off] ptr x mem))
for {
_ = v.Args[1]
x := v.Args[0]
break
}
v.reset(OpAMD64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPWmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPWload, types.TypeFlags)
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
}
// match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c])
// cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l)
- // result: @l.Block (CMPWconstmem {sym} [makeValAndOff(c,off)] ptr mem)
+ // result: @l.Block (CMPWconstload {sym} [makeValAndOff(c,off)] ptr mem)
for {
c := v.AuxInt
l := v.Args[0]
break
}
b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPWconstmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPWconstload, types.TypeFlags)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = makeValAndOff(c, off)
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPWmem_0(v *Value) bool {
- // match: (CMPWmem {sym} [off] ptr (MOVLconst [c]) mem)
+func rewriteValueAMD64_OpAMD64CMPWload_0(v *Value) bool {
+ // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem)
// cond: validValAndOff(int64(int16(c)),off)
- // result: (CMPWconstmem {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem)
+ // result: (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem)
for {
off := v.AuxInt
sym := v.Aux
if !(validValAndOff(int64(int16(c)), off)) {
break
}
- v.reset(OpAMD64CMPWconstmem)
+ v.reset(OpAMD64CMPWconstload)
v.AuxInt = makeValAndOff(int64(int16(c)), off)
v.Aux = sym
v.AddArg(ptr)
func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool {
// match: (MOVBstore [off] {sym} ptr y:(SETL x) mem)
// cond: y.Uses == 1
- // result: (SETLmem [off] {sym} ptr x mem)
+ // result: (SETLstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
if !(y.Uses == 1) {
break
}
- v.reset(OpAMD64SETLmem)
+ v.reset(OpAMD64SETLstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
// match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem)
// cond: y.Uses == 1
- // result: (SETLEmem [off] {sym} ptr x mem)
+ // result: (SETLEstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
if !(y.Uses == 1) {
break
}
- v.reset(OpAMD64SETLEmem)
+ v.reset(OpAMD64SETLEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
// match: (MOVBstore [off] {sym} ptr y:(SETG x) mem)
// cond: y.Uses == 1
- // result: (SETGmem [off] {sym} ptr x mem)
+ // result: (SETGstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
if !(y.Uses == 1) {
break
}
- v.reset(OpAMD64SETGmem)
+ v.reset(OpAMD64SETGstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
// match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem)
// cond: y.Uses == 1
- // result: (SETGEmem [off] {sym} ptr x mem)
+ // result: (SETGEstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
if !(y.Uses == 1) {
break
}
- v.reset(OpAMD64SETGEmem)
+ v.reset(OpAMD64SETGEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
// match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem)
// cond: y.Uses == 1
- // result: (SETEQmem [off] {sym} ptr x mem)
+ // result: (SETEQstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
if !(y.Uses == 1) {
break
}
- v.reset(OpAMD64SETEQmem)
+ v.reset(OpAMD64SETEQstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
// match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem)
// cond: y.Uses == 1
- // result: (SETNEmem [off] {sym} ptr x mem)
+ // result: (SETNEstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
if !(y.Uses == 1) {
break
}
- v.reset(OpAMD64SETNEmem)
+ v.reset(OpAMD64SETNEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
// match: (MOVBstore [off] {sym} ptr y:(SETB x) mem)
// cond: y.Uses == 1
- // result: (SETBmem [off] {sym} ptr x mem)
+ // result: (SETBstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
if !(y.Uses == 1) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
// match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem)
// cond: y.Uses == 1
- // result: (SETBEmem [off] {sym} ptr x mem)
+ // result: (SETBEstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
if !(y.Uses == 1) {
break
}
- v.reset(OpAMD64SETBEmem)
+ v.reset(OpAMD64SETBEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
// match: (MOVBstore [off] {sym} ptr y:(SETA x) mem)
// cond: y.Uses == 1
- // result: (SETAmem [off] {sym} ptr x mem)
+ // result: (SETAstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
if !(y.Uses == 1) {
break
}
- v.reset(OpAMD64SETAmem)
+ v.reset(OpAMD64SETAstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
// match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem)
// cond: y.Uses == 1
- // result: (SETAEmem [off] {sym} ptr x mem)
+ // result: (SETAEstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
if !(y.Uses == 1) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
// match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
// cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
- // result: (ADDLconstmem {sym} [makeValAndOff(c,off)] ptr mem)
+ // result: (ADDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
sym := v.Aux
if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
break
}
- v.reset(OpAMD64ADDLconstmem)
+ v.reset(OpAMD64ADDLconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
v.AddArg(ptr)
}
// match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
// cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
- // result: (ADDQconstmem {sym} [makeValAndOff(c,off)] ptr mem)
+ // result: (ADDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
sym := v.Aux
if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
break
}
- v.reset(OpAMD64ADDQconstmem)
+ v.reset(OpAMD64ADDQconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
v.AddArg(ptr)
func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool {
// match: (MULSD x l:(MOVSDload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (MULSDmem x [off] {sym} ptr mem)
+ // result: (MULSDload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64MULSDmem)
+ v.reset(OpAMD64MULSDload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (MULSDmem x [off] {sym} ptr mem)
+ // result: (MULSDload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64MULSDmem)
+ v.reset(OpAMD64MULSDload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValueAMD64_OpAMD64MULSDmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64MULSDload_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (MULSDmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (MULSDload [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (MULSDmem [off1+off2] {sym} val base mem)
+ // result: (MULSDload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64MULSDmem)
+ v.reset(OpAMD64MULSDload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (MULSDmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MULSDmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64MULSDmem)
+ v.reset(OpAMD64MULSDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (MULSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
+ // match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
// cond:
// result: (MULSD x (MOVQi2f y))
for {
func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool {
// match: (MULSS x l:(MOVSSload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (MULSSmem x [off] {sym} ptr mem)
+ // result: (MULSSload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64MULSSmem)
+ v.reset(OpAMD64MULSSload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (MULSSmem x [off] {sym} ptr mem)
+ // result: (MULSSload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64MULSSmem)
+ v.reset(OpAMD64MULSSload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValueAMD64_OpAMD64MULSSmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64MULSSload_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (MULSSmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (MULSSload [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (MULSSmem [off1+off2] {sym} val base mem)
+ // result: (MULSSload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64MULSSmem)
+ v.reset(OpAMD64MULSSload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (MULSSmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MULSSmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64MULSSmem)
+ v.reset(OpAMD64MULSSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (MULSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
+ // match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
// cond:
// result: (MULSS x (MOVLi2f y))
for {
}
// match: (ORL x l:(MOVLload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ORLmem x [off] {sym} ptr mem)
+ // result: (ORLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64ORLmem)
+ v.reset(OpAMD64ORLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (ORL l:(MOVLload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ORLmem x [off] {sym} ptr mem)
+ // result: (ORLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64ORLmem)
+ v.reset(OpAMD64ORLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValueAMD64_OpAMD64ORLmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64ORLload_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (ORLmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (ORLload [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (ORLmem [off1+off2] {sym} val base mem)
+ // result: (ORLload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64ORLmem)
+ v.reset(OpAMD64ORLload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ORLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ORLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64ORLmem)
+ v.reset(OpAMD64ORLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // match: (ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond:
// result: ( ORL x (MOVLf2i y))
for {
}
// match: (ORQ x l:(MOVQload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ORQmem x [off] {sym} ptr mem)
+ // result: (ORQload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64ORQmem)
+ v.reset(OpAMD64ORQload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (ORQ l:(MOVQload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (ORQmem x [off] {sym} ptr mem)
+ // result: (ORQload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64ORQmem)
+ v.reset(OpAMD64ORQload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValueAMD64_OpAMD64ORQmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64ORQload_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (ORQmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (ORQload [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (ORQmem [off1+off2] {sym} val base mem)
+ // result: (ORQload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64ORQmem)
+ v.reset(OpAMD64ORQload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ORQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ORQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (ORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64ORQmem)
+ v.reset(OpAMD64ORQload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (ORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // match: (ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// cond:
// result: ( ORQ x (MOVQf2i y))
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64SETAEmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETAEstore_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SETAEmem [off] {sym} ptr (InvertFlags x) mem)
+ // match: (SETAEstore [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETBEmem [off] {sym} ptr x mem)
+ // result: (SETBEstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETBEmem)
+ v.reset(OpAMD64SETBEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETAEmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETAEmem [off1+off2] {sym} base val mem)
+ // result: (SETAEstore [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETAEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETAEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETAEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETAEmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETAEstore [off] {sym} ptr x:(FlagEQ) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETAEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETAEstore [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETAEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SETAEstore [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETAEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETAEstore [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETAEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETAEstore [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64SETAmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETAstore_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SETAmem [off] {sym} ptr (InvertFlags x) mem)
+ // match: (SETAstore [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETBmem [off] {sym} ptr x mem)
+ // result: (SETBstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETAmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETAmem [off1+off2] {sym} base val mem)
+ // result: (SETAstore [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETAmem)
+ v.reset(OpAMD64SETAstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETAmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETAmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETAstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETAmem)
+ v.reset(OpAMD64SETAstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETAmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETAstore [off] {sym} ptr x:(FlagEQ) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETAmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETAstore [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETAmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SETAstore [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETAmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETAstore [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETAmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETAstore [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64SETBEmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETBEstore_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SETBEmem [off] {sym} ptr (InvertFlags x) mem)
+ // match: (SETBEstore [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETAEmem [off] {sym} ptr x mem)
+ // result: (SETAEstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETBEmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETBEmem [off1+off2] {sym} base val mem)
+ // result: (SETBEstore [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETBEmem)
+ v.reset(OpAMD64SETBEstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETBEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETBEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETBEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETBEmem)
+ v.reset(OpAMD64SETBEstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETBEmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETBEstore [off] {sym} ptr x:(FlagEQ) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETBEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETBEstore [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETBEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SETBEstore [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETBEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETBEstore [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETBEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETBEstore [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64SETBmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETBstore_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SETBmem [off] {sym} ptr (InvertFlags x) mem)
+ // match: (SETBstore [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETAmem [off] {sym} ptr x mem)
+ // result: (SETAstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETAmem)
+ v.reset(OpAMD64SETAstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETBmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETBmem [off1+off2] {sym} base val mem)
+ // result: (SETBstore [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETBmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETBmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETBmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETBstore [off] {sym} ptr x:(FlagEQ) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETBmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETBstore [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETBmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SETBstore [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETBmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETBstore [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETBmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETBstore [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64SETEQmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (SETEQmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
// cond: !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTL x y) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTL x y) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(!config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem)
// cond: !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTL x y) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTL x y) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(!config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
// cond: !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(!config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem)
// cond: !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(!config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTLconst [c] x) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem)
// cond: isUint32PowerOfTwo(c) && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(isUint32PowerOfTwo(c) && !config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTQconst [c] x) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem)
// cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [log2(c)] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(isUint64PowerOfTwo(c) && !config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
// cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [log2(c)] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(isUint64PowerOfTwo(c) && !config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem)
// cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [log2(c)] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(isUint64PowerOfTwo(c) && !config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem)
+ // match: (SETEQstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem)
// cond:
- // result: (SETNEmem [off] {sym} ptr (CMPLconst [0] s) mem)
+ // result: (SETNEstore [off] {sym} ptr (CMPLconst [0] s) mem)
for {
off := v.AuxInt
sym := v.Aux
break
}
mem := v.Args[2]
- v.reset(OpAMD64SETNEmem)
+ v.reset(OpAMD64SETNEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
+ // match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
// cond:
- // result: (SETNEmem [off] {sym} ptr (CMPQconst [0] s) mem)
+ // result: (SETNEstore [off] {sym} ptr (CMPQconst [0] s) mem)
for {
off := v.AuxInt
sym := v.Aux
break
}
mem := v.Args[2]
- v.reset(OpAMD64SETNEmem)
+ v.reset(OpAMD64SETNEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
return false
}
-func rewriteValueAMD64_OpAMD64SETEQmem_10(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (SETEQmem [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTQconst [63] x) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTQconst [63] x) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTLconst [31] x) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTL z2 z1:(SHLLconst [31] (SHRLconst [31] x))) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTL z2 z1:(SHLLconst [31] (SHRLconst [31] x))) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTLconst [31] x) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTQconst [0] x) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTQconst [0] x) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTLconst [0] x) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTLconst [0] x) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTQconst [63] x) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] x)) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] x)) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTQconst [63] x) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
return false
}
-func rewriteValueAMD64_OpAMD64SETEQmem_20(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETEQstore_20(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (SETEQmem [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTLconst [31] x) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] x)) mem)
+ // match: (SETEQstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] x)) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTLconst [31] x) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETAEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (InvertFlags x) mem)
+ // match: (SETEQstore [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETEQmem [off] {sym} ptr x mem)
+ // result: (SETEQstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETEQmem)
+ v.reset(OpAMD64SETEQstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETEQmem [off1+off2] {sym} base val mem)
+ // result: (SETEQstore [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETEQmem)
+ v.reset(OpAMD64SETEQstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETEQmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETEQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETEQmem)
+ v.reset(OpAMD64SETEQstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETEQstore [off] {sym} ptr x:(FlagEQ) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETEQstore [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SETEQstore [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETEQstore [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETEQstore [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64SETGEmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETGEstore_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SETGEmem [off] {sym} ptr (InvertFlags x) mem)
+ // match: (SETGEstore [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETLEmem [off] {sym} ptr x mem)
+ // result: (SETLEstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETLEmem)
+ v.reset(OpAMD64SETLEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETGEmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETGEmem [off1+off2] {sym} base val mem)
+ // result: (SETGEstore [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETGEmem)
+ v.reset(OpAMD64SETGEstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETGEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETGEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETGEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETGEmem)
+ v.reset(OpAMD64SETGEstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETGEmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETGEstore [off] {sym} ptr x:(FlagEQ) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETGEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETGEstore [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETGEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SETGEstore [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETGEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETGEstore [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETGEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETGEstore [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64SETGmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETGstore_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SETGmem [off] {sym} ptr (InvertFlags x) mem)
+ // match: (SETGstore [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETLmem [off] {sym} ptr x mem)
+ // result: (SETLstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETLmem)
+ v.reset(OpAMD64SETLstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETGmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETGmem [off1+off2] {sym} base val mem)
+ // result: (SETGstore [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETGmem)
+ v.reset(OpAMD64SETGstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETGmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETGmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETGstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETGmem)
+ v.reset(OpAMD64SETGstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETGmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETGstore [off] {sym} ptr x:(FlagEQ) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETGmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETGstore [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETGmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SETGstore [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETGmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETGstore [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETGmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETGstore [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64SETLEmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETLEstore_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SETLEmem [off] {sym} ptr (InvertFlags x) mem)
+ // match: (SETLEstore [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETGEmem [off] {sym} ptr x mem)
+ // result: (SETGEstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETGEmem)
+ v.reset(OpAMD64SETGEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETLEmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETLEmem [off1+off2] {sym} base val mem)
+ // result: (SETLEstore [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETLEmem)
+ v.reset(OpAMD64SETLEstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETLEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETLEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETLEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETLEmem)
+ v.reset(OpAMD64SETLEstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETLEmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETLEstore [off] {sym} ptr x:(FlagEQ) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETLEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETLEstore [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETLEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SETLEstore [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETLEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETLEstore [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETLEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETLEstore [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64SETLmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETLstore_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SETLmem [off] {sym} ptr (InvertFlags x) mem)
+ // match: (SETLstore [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETGmem [off] {sym} ptr x mem)
+ // result: (SETGstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETGmem)
+ v.reset(OpAMD64SETGstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETLmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETLmem [off1+off2] {sym} base val mem)
+ // result: (SETLstore [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETLmem)
+ v.reset(OpAMD64SETLstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETLmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETLmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETLmem)
+ v.reset(OpAMD64SETLstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETLmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETLstore [off] {sym} ptr x:(FlagEQ) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETLmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETLstore [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETLmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SETLstore [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETLmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETLstore [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETLmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETLstore [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64SETNEmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (SETNEmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
// cond: !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTL x y) mem)
+ // result: (SETBstore [off] {sym} ptr (BTL x y) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(!config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem)
// cond: !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTL x y) mem)
+ // result: (SETBstore [off] {sym} ptr (BTL x y) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(!config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
// cond: !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTQ x y) mem)
+ // result: (SETBstore [off] {sym} ptr (BTQ x y) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(!config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem)
// cond: !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTQ x y) mem)
+ // result: (SETBstore [off] {sym} ptr (BTQ x y) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(!config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTLconst [c] x) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem)
// cond: isUint32PowerOfTwo(c) && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem)
+ // result: (SETBstore [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(isUint32PowerOfTwo(c) && !config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTQconst [c] x) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem)
// cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
+ // result: (SETBstore [off] {sym} ptr (BTQconst [log2(c)] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(isUint64PowerOfTwo(c) && !config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
// cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
+ // result: (SETBstore [off] {sym} ptr (BTQconst [log2(c)] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(isUint64PowerOfTwo(c) && !config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem)
// cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
+ // result: (SETBstore [off] {sym} ptr (BTQconst [log2(c)] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(isUint64PowerOfTwo(c) && !config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem)
+ // match: (SETNEstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem)
// cond:
- // result: (SETEQmem [off] {sym} ptr (CMPLconst [0] s) mem)
+ // result: (SETEQstore [off] {sym} ptr (CMPLconst [0] s) mem)
for {
off := v.AuxInt
sym := v.Aux
break
}
mem := v.Args[2]
- v.reset(OpAMD64SETEQmem)
+ v.reset(OpAMD64SETEQstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
+ // match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
// cond:
- // result: (SETEQmem [off] {sym} ptr (CMPQconst [0] s) mem)
+ // result: (SETEQstore [off] {sym} ptr (CMPQconst [0] s) mem)
for {
off := v.AuxInt
sym := v.Aux
break
}
mem := v.Args[2]
- v.reset(OpAMD64SETEQmem)
+ v.reset(OpAMD64SETEQstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
return false
}
-func rewriteValueAMD64_OpAMD64SETNEmem_10(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (SETNEmem [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTQconst [63] x) mem)
+ // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTQconst [63] x) mem)
+ // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTLconst [31] x) mem)
+ // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTL z2 z1:(SHLLconst [31] (SHRLconst [31] x))) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTL z2 z1:(SHLLconst [31] (SHRLconst [31] x))) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTLconst [31] x) mem)
+ // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTQconst [0] x) mem)
+ // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTQconst [0] x) mem)
+ // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTLconst [0] x) mem)
+ // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTLconst [0] x) mem)
+ // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTQconst [63] x) mem)
+ // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] x)) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] x)) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTQconst [63] x) mem)
+ // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
}
return false
}
-func rewriteValueAMD64_OpAMD64SETNEmem_20(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETNEstore_20(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
- // match: (SETNEmem [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTLconst [31] x) mem)
+ // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] x)) mem)
+ // match: (SETNEstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] x)) mem)
// cond: z1==z2 && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTLconst [31] x) mem)
+ // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem)
for {
off := v.AuxInt
sym := v.Aux
if !(z1 == z2 && !config.nacl) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (InvertFlags x) mem)
+ // match: (SETNEstore [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETNEmem [off] {sym} ptr x mem)
+ // result: (SETNEstore [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETNEmem)
+ v.reset(OpAMD64SETNEstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETNEmem [off1+off2] {sym} base val mem)
+ // result: (SETNEstore [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETNEmem)
+ v.reset(OpAMD64SETNEstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETNEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETNEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETNEmem)
+ v.reset(OpAMD64SETNEstore)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETNEstore [off] {sym} ptr x:(FlagEQ) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETNEstore [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SETNEstore [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETNEstore [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETNEstore [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
}
// match: (SUBL x l:(MOVLload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (SUBLmem x [off] {sym} ptr mem)
+ // result: (SUBLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64SUBLmem)
+ v.reset(OpAMD64SUBLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
return true
}
}
-func rewriteValueAMD64_OpAMD64SUBLmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SUBLload_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (SUBLmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (SUBLload [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (SUBLmem [off1+off2] {sym} val base mem)
+ // result: (SUBLload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SUBLmem)
+ v.reset(OpAMD64SUBLload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (SUBLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SUBLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SUBLmem)
+ v.reset(OpAMD64SUBLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond:
// result: (SUBL x (MOVLf2i y))
for {
}
// match: (SUBQ x l:(MOVQload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (SUBQmem x [off] {sym} ptr mem)
+ // result: (SUBQload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64SUBQmem)
+ v.reset(OpAMD64SUBQload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SUBQmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SUBQload_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (SUBQmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (SUBQload [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (SUBQmem [off1+off2] {sym} val base mem)
+ // result: (SUBQload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SUBQmem)
+ v.reset(OpAMD64SUBQload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (SUBQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SUBQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (SUBQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SUBQmem)
+ v.reset(OpAMD64SUBQload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// cond:
// result: (SUBQ x (MOVQf2i y))
for {
func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool {
// match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (SUBSDmem x [off] {sym} ptr mem)
+ // result: (SUBSDload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64SUBSDmem)
+ v.reset(OpAMD64SUBSDload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SUBSDmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SUBSDload_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (SUBSDmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (SUBSDload [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (SUBSDmem [off1+off2] {sym} val base mem)
+ // result: (SUBSDload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SUBSDmem)
+ v.reset(OpAMD64SUBSDload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (SUBSDmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SUBSDmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SUBSDmem)
+ v.reset(OpAMD64SUBSDload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
+ // match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
// cond:
// result: (SUBSD x (MOVQi2f y))
for {
func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool {
// match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (SUBSSmem x [off] {sym} ptr mem)
+ // result: (SUBSSload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64SUBSSmem)
+ v.reset(OpAMD64SUBSSload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SUBSSmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SUBSSload_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (SUBSSmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (SUBSSload [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (SUBSSmem [off1+off2] {sym} val base mem)
+ // result: (SUBSSload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SUBSSmem)
+ v.reset(OpAMD64SUBSSload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (SUBSSmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SUBSSmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SUBSSmem)
+ v.reset(OpAMD64SUBSSload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
+ // match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
// cond:
// result: (SUBSS x (MOVLi2f y))
for {
}
// match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2)
// cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPBconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0,off)] ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
break
}
b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPBconstmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPBconstload, types.TypeFlags)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = makeValAndOff(0, off)
}
// match: (TESTB l2 l:(MOVBload {sym} [off] ptr mem))
// cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPBconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0,off)] ptr mem)
for {
_ = v.Args[1]
l2 := v.Args[0]
break
}
b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPBconstmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPBconstload, types.TypeFlags)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = makeValAndOff(0, off)
}
// match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2)
// cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPLconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0,off)] ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
break
}
b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPLconstmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconstload, types.TypeFlags)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = makeValAndOff(0, off)
}
// match: (TESTL l2 l:(MOVLload {sym} [off] ptr mem))
// cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPLconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0,off)] ptr mem)
for {
_ = v.Args[1]
l2 := v.Args[0]
break
}
b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPLconstmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconstload, types.TypeFlags)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = makeValAndOff(0, off)
}
// match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2)
// cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPQconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0,off)] ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
break
}
b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQconstmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconstload, types.TypeFlags)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = makeValAndOff(0, off)
}
// match: (TESTQ l2 l:(MOVQload {sym} [off] ptr mem))
// cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPQconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0,off)] ptr mem)
for {
_ = v.Args[1]
l2 := v.Args[0]
break
}
b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQconstmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconstload, types.TypeFlags)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = makeValAndOff(0, off)
}
// match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2)
// cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPWconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0,off)] ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
break
}
b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPWconstmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPWconstload, types.TypeFlags)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = makeValAndOff(0, off)
}
// match: (TESTW l2 l:(MOVWload {sym} [off] ptr mem))
// cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPWconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0,off)] ptr mem)
for {
_ = v.Args[1]
l2 := v.Args[0]
break
}
b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPWconstmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPWconstload, types.TypeFlags)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = makeValAndOff(0, off)
}
// match: (XORL x l:(MOVLload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (XORLmem x [off] {sym} ptr mem)
+ // result: (XORLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64XORLmem)
+ v.reset(OpAMD64XORLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
// match: (XORL l:(MOVLload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (XORLmem x [off] {sym} ptr mem)
+ // result: (XORLload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64XORLmem)
+ v.reset(OpAMD64XORLload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValueAMD64_OpAMD64XORLmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64XORLload_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (XORLmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (XORLload [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (XORLmem [off1+off2] {sym} val base mem)
+ // result: (XORLload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64XORLmem)
+ v.reset(OpAMD64XORLload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (XORLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (XORLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64XORLmem)
+ v.reset(OpAMD64XORLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond:
// result: (XORL x (MOVLf2i y))
for {
}
// match: (XORQ x l:(MOVQload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (XORQmem x [off] {sym} ptr mem)
+ // result: (XORQload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64XORQmem)
+ v.reset(OpAMD64XORQload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
func rewriteValueAMD64_OpAMD64XORQ_10(v *Value) bool {
// match: (XORQ l:(MOVQload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (XORQmem x [off] {sym} ptr mem)
+ // result: (XORQload x [off] {sym} ptr mem)
for {
_ = v.Args[1]
l := v.Args[0]
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64XORQmem)
+ v.reset(OpAMD64XORQload)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValueAMD64_OpAMD64XORQmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64XORQload_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (XORQmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (XORQload [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (XORQmem [off1+off2] {sym} val base mem)
+ // result: (XORQload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64XORQmem)
+ v.reset(OpAMD64XORQload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (XORQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (XORQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (XORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64XORQmem)
+ v.reset(OpAMD64XORQload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// cond:
// result: (XORQ x (MOVQf2i y))
for {