_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (ADDLconstmem [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (ADDLconstmem [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64ADDLconstmem)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ADDLconstmem [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (ADDLconstmem [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDLconstmem)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _))
// cond:
// result: (ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (ADDLmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (ADDLmem [off1+off2] {sym} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64ADDLmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ADDLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (ADDLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDLmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond:
// result: (ADDL x (MOVLf2i y))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (ADDQconstmem [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (ADDQconstmem [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconstmem)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ADDQconstmem [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (ADDQconstmem [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconstmem)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _))
// cond:
// result: (ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (ADDQmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (ADDQmem [off1+off2] {sym} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64ADDQmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ADDQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (ADDQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDQmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// cond:
// result: (ADDQ x (MOVQf2i y))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (ADDSDmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (ADDSDmem [off1+off2] {sym} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64ADDSDmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ADDSDmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (ADDSDmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDSDmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
// cond:
// result: (ADDSD x (MOVQi2f y))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (ADDSSmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (ADDSSmem [off1+off2] {sym} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64ADDSSmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ADDSSmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (ADDSSmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDSSmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
// cond:
// result: (ADDSS x (MOVLi2f y))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (ANDLmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (ANDLmem [off1+off2] {sym} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64ANDLmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ANDLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (ANDLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDLmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond:
// result: (ANDL x (MOVLf2i y))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (ANDQmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (ANDQmem [off1+off2] {sym} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64ANDQmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ANDQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (ANDQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDQmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// cond:
// result: (ANDQ x (MOVQf2i y))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (MULSDmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MULSDmem [off1+off2] {sym} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MULSDmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MULSDmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MULSDmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MULSDmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (MULSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
// cond:
// result: (MULSD x (MOVQi2f y))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (MULSSmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MULSSmem [off1+off2] {sym} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MULSSmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MULSSmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MULSSmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MULSSmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (MULSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
// cond:
// result: (MULSS x (MOVLi2f y))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (ORLmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (ORLmem [off1+off2] {sym} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64ORLmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ORLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (ORLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORLmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (ORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond:
// result: ( ORL x (MOVLf2i y))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (ORQmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (ORQmem [off1+off2] {sym} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64ORQmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ORQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (ORQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORQmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (ORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// cond:
// result: ( ORQ x (MOVQf2i y))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (SUBLmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (SUBLmem [off1+off2] {sym} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64SUBLmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SUBLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (SUBLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBLmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond:
// result: (SUBL x (MOVLf2i y))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (SUBQmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (SUBQmem [off1+off2] {sym} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64SUBQmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SUBQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (SUBQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBQmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// cond:
// result: (SUBQ x (MOVQf2i y))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (SUBSDmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (SUBSDmem [off1+off2] {sym} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64SUBSDmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SUBSDmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (SUBSDmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBSDmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
// cond:
// result: (SUBSD x (MOVQi2f y))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (SUBSSmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (SUBSSmem [off1+off2] {sym} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64SUBSSmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SUBSSmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (SUBSSmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBSSmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
// cond:
// result: (SUBSS x (MOVLi2f y))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (XORLmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (XORLmem [off1+off2] {sym} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64XORLmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (XORLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (XORLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORLmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond:
// result: (XORL x (MOVLf2i y))
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (XORQmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (XORQmem [off1+off2] {sym} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64XORQmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (XORQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (XORQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORQmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
// match: (XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// cond:
// result: (XORQ x (MOVQf2i y))