((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
(CMP(Q|L|W|B)load [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) ->
(CMP(Q|L|W|B)load [off1+off2] {sym} base val mem)
-(CMP(Q|L|W|B)constload [off1] {sym} (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
- (CMP(Q|L|W|B)constload [off1+off2] {sym} base mem)
+(CMP(Q|L|W|B)constload [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
+ (CMP(Q|L|W|B)constload [ValAndOff(valoff1).add(off2)] {sym} base mem)
((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
(CMP(Q|L|W|B)load [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(CMP(Q|L|W|B)load [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(CMP(Q|L|W|B)constload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (CMP(Q|L|W|B)constload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(CMP(Q|L|W|B)constload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ && ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) ->
+ (CMP(Q|L|W|B)constload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
return false
}
func rewriteValueAMD64_OpAMD64CMPBconstload_0(v *Value) bool {
- // match: (CMPBconstload [off1] {sym} (ADDQconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (CMPBconstload [off1+off2] {sym} base mem)
+ // match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (CMPBconstload [ValAndOff(valoff1).add(off2)] {sym} base mem)
for {
- off1 := v.AuxInt
+ valoff1 := v.AuxInt
sym := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
off2 := v_0.AuxInt
base := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1 + off2)) {
+ if !(ValAndOff(valoff1).canAdd(off2)) {
break
}
v.reset(OpAMD64CMPBconstload)
- v.AuxInt = off1 + off2
+ v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (CMPBconstload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (CMPBconstload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPBconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
+ valoff1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
sym2 := v_0.Aux
base := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64CMPBconstload)
- v.AuxInt = off1 + off2
+ v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return false
}
func rewriteValueAMD64_OpAMD64CMPLconstload_0(v *Value) bool {
- // match: (CMPLconstload [off1] {sym} (ADDQconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (CMPLconstload [off1+off2] {sym} base mem)
+ // match: (CMPLconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (CMPLconstload [ValAndOff(valoff1).add(off2)] {sym} base mem)
for {
- off1 := v.AuxInt
+ valoff1 := v.AuxInt
sym := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
off2 := v_0.AuxInt
base := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1 + off2)) {
+ if !(ValAndOff(valoff1).canAdd(off2)) {
break
}
v.reset(OpAMD64CMPLconstload)
- v.AuxInt = off1 + off2
+ v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (CMPLconstload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (CMPLconstload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPLconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
+ valoff1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
sym2 := v_0.Aux
base := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64CMPLconstload)
- v.AuxInt = off1 + off2
+ v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return false
}
func rewriteValueAMD64_OpAMD64CMPQconstload_0(v *Value) bool {
- // match: (CMPQconstload [off1] {sym} (ADDQconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (CMPQconstload [off1+off2] {sym} base mem)
+ // match: (CMPQconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (CMPQconstload [ValAndOff(valoff1).add(off2)] {sym} base mem)
for {
- off1 := v.AuxInt
+ valoff1 := v.AuxInt
sym := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
off2 := v_0.AuxInt
base := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1 + off2)) {
+ if !(ValAndOff(valoff1).canAdd(off2)) {
break
}
v.reset(OpAMD64CMPQconstload)
- v.AuxInt = off1 + off2
+ v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (CMPQconstload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (CMPQconstload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPQconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
+ valoff1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
sym2 := v_0.Aux
base := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64CMPQconstload)
- v.AuxInt = off1 + off2
+ v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return false
}
func rewriteValueAMD64_OpAMD64CMPWconstload_0(v *Value) bool {
- // match: (CMPWconstload [off1] {sym} (ADDQconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (CMPWconstload [off1+off2] {sym} base mem)
+ // match: (CMPWconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (CMPWconstload [ValAndOff(valoff1).add(off2)] {sym} base mem)
for {
- off1 := v.AuxInt
+ valoff1 := v.AuxInt
sym := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
off2 := v_0.AuxInt
base := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1 + off2)) {
+ if !(ValAndOff(valoff1).canAdd(off2)) {
break
}
v.reset(OpAMD64CMPWconstload)
- v.AuxInt = off1 + off2
+ v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (CMPWconstload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (CMPWconstload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPWconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
+ valoff1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
sym2 := v_0.Aux
base := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64CMPWconstload)
- v.AuxInt = off1 + off2
+ v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)