return true
}
// match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ADDLconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ADDLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ADDLmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (ADDQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ADDQconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ADDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDQload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ADDQload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ADDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDQmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ADDQmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDSDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ADDSDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDSSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ADDSSload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ANDLconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ANDLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ANDLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ANDLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ANDLmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (ANDQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ANDQconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ANDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ANDQload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ANDQload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ANDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ANDQmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ANDQmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (BTCLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (BTCLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (BTCLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64BTCLconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (BTCLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (BTCLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (BTCLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64BTCLmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (BTCQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (BTCQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (BTCQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64BTCQconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (BTCQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (BTCQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (BTCQmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64BTCQmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (BTRLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (BTRLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (BTRLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64BTRLconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (BTRLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (BTRLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (BTRLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64BTRLmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (BTRQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (BTRQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (BTRQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64BTRQconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (BTRQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (BTRQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (BTRQmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64BTRQmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (BTSLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (BTSLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (BTSLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64BTSLconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (BTSLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (BTSLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (BTSLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64BTSLmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (BTSQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (BTSQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (BTSQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64BTSQconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (BTSQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (BTSQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (BTSQmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64BTSQmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (CMPBconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64CMPBconstload)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (CMPBload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (CMPBload [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64CMPBload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (CMPLconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64CMPLconstload)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (CMPLload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (CMPLload [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64CMPLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (CMPQconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64CMPQconstload)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (CMPQload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (CMPQload [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64CMPQload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (CMPWconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64CMPWconstload)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (CMPWload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (CMPWload [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64CMPWload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (DIVSDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64DIVSDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (DIVSSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64DIVSSload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
break
}
// match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ [off1+off2] {mergeSymTyped(sym1,sym2)} x)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
x := v_0.Args[0]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64LEAQ)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg(x)
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ1 {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
y := v_0.Args[1]
x := v_0.Args[0]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64LEAQ1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ2 {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
y := v_0.Args[1]
x := v_0.Args[0]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64LEAQ2)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ4 {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
y := v_0.Args[1]
x := v_0.Args[0]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64LEAQ4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
// match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ8 {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
y := v_0.Args[1]
x := v_0.Args[0]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64LEAQ8)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
break
}
// match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpAMD64LEAQ {
continue
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
x := v_0.Args[0]
y := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
continue
}
v.reset(OpAMD64LEAQ1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
break
}
// match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ2 [off1+off2] {mergeSymTyped(sym1, sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1.Op != OpAMD64LEAQ1 {
continue
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
y := v_1.Args[1]
- if y != v_1.Args[0] || !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
continue
}
v.reset(OpAMD64LEAQ2)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
break
}
// match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ2 [off1+off2] {mergeSymTyped(sym1, sym2)} y x)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1.Op != OpAMD64LEAQ1 {
continue
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
v_1_1 := v_1.Args[1]
continue
}
y := v_1_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
continue
}
v.reset(OpAMD64LEAQ2)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(y, x)
return true
}
return true
}
// match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
x := v_0.Args[0]
y := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ2)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
// match: (LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
- // cond: is32Bit(off1+2*off2) && sym2 == nil
+ // cond: is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil
// result: (LEAQ4 [off1+2*off2] {sym1} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
x := v_0
if v_1.Op != OpAMD64LEAQ1 {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
y := v_1.Args[1]
- if y != v_1.Args[0] || !(is32Bit(off1+2*off2) && sym2 == nil) {
+ if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
break
}
v.reset(OpAMD64LEAQ4)
- v.AuxInt = off1 + 2*off2
- v.Aux = sym1
+ v.AuxInt = int32ToAuxInt(off1 + 2*off2)
+ v.Aux = symToAux(sym1)
v.AddArg2(x, y)
return true
}
// match: (LEAQ2 [off] {sym} x (MOVQconst [scale]))
- // cond: is32Bit(off+scale*2)
- // result: (LEAQ [off+scale*2] {sym} x)
+ // cond: is32Bit(int64(off)+int64(scale)*2)
+ // result: (LEAQ [off+int32(scale)*2] {sym} x)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if v_1.Op != OpAMD64MOVQconst {
break
}
- scale := v_1.AuxInt
- if !(is32Bit(off + scale*2)) {
+ scale := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*2)) {
break
}
v.reset(OpAMD64LEAQ)
- v.AuxInt = off + scale*2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
+ v.Aux = symToAux(sym)
v.AddArg(x)
return true
}
// match: (LEAQ2 [off] {sym} x (MOVLconst [scale]))
- // cond: is32Bit(off+scale*2)
- // result: (LEAQ [off+scale*2] {sym} x)
+ // cond: is32Bit(int64(off)+int64(scale)*2)
+ // result: (LEAQ [off+int32(scale)*2] {sym} x)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if v_1.Op != OpAMD64MOVLconst {
break
}
- scale := v_1.AuxInt
- if !(is32Bit(off + scale*2)) {
+ scale := auxIntToInt32(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*2)) {
break
}
v.reset(OpAMD64LEAQ)
- v.AuxInt = off + scale*2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
+ v.Aux = symToAux(sym)
v.AddArg(x)
return true
}
return true
}
// match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
x := v_0.Args[0]
y := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
// match: (LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
- // cond: is32Bit(off1+4*off2) && sym2 == nil
+ // cond: is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil
// result: (LEAQ8 [off1+4*off2] {sym1} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
x := v_0
if v_1.Op != OpAMD64LEAQ1 {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
y := v_1.Args[1]
- if y != v_1.Args[0] || !(is32Bit(off1+4*off2) && sym2 == nil) {
+ if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
break
}
v.reset(OpAMD64LEAQ8)
- v.AuxInt = off1 + 4*off2
- v.Aux = sym1
+ v.AuxInt = int32ToAuxInt(off1 + 4*off2)
+ v.Aux = symToAux(sym1)
v.AddArg2(x, y)
return true
}
// match: (LEAQ4 [off] {sym} x (MOVQconst [scale]))
- // cond: is32Bit(off+scale*4)
- // result: (LEAQ [off+scale*4] {sym} x)
+ // cond: is32Bit(int64(off)+int64(scale)*4)
+ // result: (LEAQ [off+int32(scale)*4] {sym} x)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if v_1.Op != OpAMD64MOVQconst {
break
}
- scale := v_1.AuxInt
- if !(is32Bit(off + scale*4)) {
+ scale := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*4)) {
break
}
v.reset(OpAMD64LEAQ)
- v.AuxInt = off + scale*4
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
+ v.Aux = symToAux(sym)
v.AddArg(x)
return true
}
// match: (LEAQ4 [off] {sym} x (MOVLconst [scale]))
- // cond: is32Bit(off+scale*4)
- // result: (LEAQ [off+scale*4] {sym} x)
+ // cond: is32Bit(int64(off)+int64(scale)*4)
+ // result: (LEAQ [off+int32(scale)*4] {sym} x)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if v_1.Op != OpAMD64MOVLconst {
break
}
- scale := v_1.AuxInt
- if !(is32Bit(off + scale*4)) {
+ scale := auxIntToInt32(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*4)) {
break
}
v.reset(OpAMD64LEAQ)
- v.AuxInt = off + scale*4
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
+ v.Aux = symToAux(sym)
v.AddArg(x)
return true
}
return true
}
// match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
x := v_0.Args[0]
y := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
v.reset(OpAMD64LEAQ8)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, y)
return true
}
// match: (LEAQ8 [off] {sym} x (MOVQconst [scale]))
- // cond: is32Bit(off+scale*8)
- // result: (LEAQ [off+scale*8] {sym} x)
+ // cond: is32Bit(int64(off)+int64(scale)*8)
+ // result: (LEAQ [off+int32(scale)*8] {sym} x)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if v_1.Op != OpAMD64MOVQconst {
break
}
- scale := v_1.AuxInt
- if !(is32Bit(off + scale*8)) {
+ scale := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*8)) {
break
}
v.reset(OpAMD64LEAQ)
- v.AuxInt = off + scale*8
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
+ v.Aux = symToAux(sym)
v.AddArg(x)
return true
}
// match: (LEAQ8 [off] {sym} x (MOVLconst [scale]))
- // cond: is32Bit(off+scale*8)
- // result: (LEAQ [off+scale*8] {sym} x)
+ // cond: is32Bit(int64(off)+int64(scale)*8)
+ // result: (LEAQ [off+int32(scale)*8] {sym} x)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if v_1.Op != OpAMD64MOVLconst {
break
}
- scale := v_1.AuxInt
- if !(is32Bit(off + scale*8)) {
+ scale := auxIntToInt32(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*8)) {
break
}
v.reset(OpAMD64LEAQ)
- v.AuxInt = off + scale*8
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
+ v.Aux = symToAux(sym)
v.AddArg(x)
return true
}
return true
}
// match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBQSXload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBQSXload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
- // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
- // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
for {
- sc := v.AuxInt
- sym1 := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off := v_0.AuxInt
- sym2 := v_0.Aux
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
break
}
v.reset(OpAMD64MOVBstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
return true
}
// match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVLQSXload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVLQSXload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVLload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVLstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVLstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
- // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
- // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
for {
- sc := v.AuxInt
- sym1 := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off := v_0.AuxInt
- sym2 := v_0.Aux
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
break
}
v.reset(OpAMD64MOVLstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
return true
}
// match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVOload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVOload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVOstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVOstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVQload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVQload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVQstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVQstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
- // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
- // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
for {
- sc := v.AuxInt
- sym1 := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off := v_0.AuxInt
- sym2 := v_0.Aux
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
break
}
v.reset(OpAMD64MOVQstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
return true
}
// match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVSDload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVSDstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSDstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVSSload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSSload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVSSstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVSSstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWQSXload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVWQSXload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVWload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
- // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
- // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
for {
- sc := v.AuxInt
- sym1 := v.Aux
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off := v_0.AuxInt
- sym2 := v_0.Aux
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
break
}
v.reset(OpAMD64MOVWstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
return true
}
// match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MULSDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MULSDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MULSSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MULSSload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (ORLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ORLconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ORLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ORLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ORLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ORLmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (ORQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ORQconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ORQload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ORQload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ORQmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ORQmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETAEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETAEstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SETAEstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETAstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETAstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SETAstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETBEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETBEstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SETBEstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SETBstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETEQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETEQstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SETEQstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETGEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETGEstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SETGEstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETGstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETGstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SETGstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETLEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETLEstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SETLEstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETLstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SETLstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETNEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETNEstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SETNEstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SUBLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SUBLmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SUBQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBQload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SUBQload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SUBQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBQmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SUBQmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBSDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SUBSDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBSSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SUBSSload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (XORLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64XORLconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (XORLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64XORLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (XORLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64XORLmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
return true
}
// match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
- // result: (XORQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
mem := v_1
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64XORQconstmodify)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(base, mem)
return true
}
return true
}
// match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (XORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (XORQload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
val := v_0
if v_1.Op != OpAMD64LEAQ {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
base := v_1.Args[0]
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64XORQload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
return true
}
// match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (XORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (XORQmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64XORQmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}