return rewriteValueAMD64_OpAMD64ANDQmodify_0(v)
case OpAMD64BSFQ:
return rewriteValueAMD64_OpAMD64BSFQ_0(v)
+ case OpAMD64BTCLconst:
+ return rewriteValueAMD64_OpAMD64BTCLconst_0(v)
+ case OpAMD64BTCLconstmodify:
+ return rewriteValueAMD64_OpAMD64BTCLconstmodify_0(v)
+ case OpAMD64BTCLmodify:
+ return rewriteValueAMD64_OpAMD64BTCLmodify_0(v)
+ case OpAMD64BTCQconst:
+ return rewriteValueAMD64_OpAMD64BTCQconst_0(v)
+ case OpAMD64BTCQconstmodify:
+ return rewriteValueAMD64_OpAMD64BTCQconstmodify_0(v)
+ case OpAMD64BTCQmodify:
+ return rewriteValueAMD64_OpAMD64BTCQmodify_0(v)
case OpAMD64BTLconst:
return rewriteValueAMD64_OpAMD64BTLconst_0(v)
case OpAMD64BTQconst:
return rewriteValueAMD64_OpAMD64BTQconst_0(v)
case OpAMD64BTRLconst:
return rewriteValueAMD64_OpAMD64BTRLconst_0(v)
+ case OpAMD64BTRLconstmodify:
+ return rewriteValueAMD64_OpAMD64BTRLconstmodify_0(v)
+ case OpAMD64BTRLmodify:
+ return rewriteValueAMD64_OpAMD64BTRLmodify_0(v)
case OpAMD64BTRQconst:
return rewriteValueAMD64_OpAMD64BTRQconst_0(v)
+ case OpAMD64BTRQconstmodify:
+ return rewriteValueAMD64_OpAMD64BTRQconstmodify_0(v)
+ case OpAMD64BTRQmodify:
+ return rewriteValueAMD64_OpAMD64BTRQmodify_0(v)
case OpAMD64BTSLconst:
return rewriteValueAMD64_OpAMD64BTSLconst_0(v)
+ case OpAMD64BTSLconstmodify:
+ return rewriteValueAMD64_OpAMD64BTSLconstmodify_0(v)
+ case OpAMD64BTSLmodify:
+ return rewriteValueAMD64_OpAMD64BTSLmodify_0(v)
case OpAMD64BTSQconst:
return rewriteValueAMD64_OpAMD64BTSQconst_0(v)
+ case OpAMD64BTSQconstmodify:
+ return rewriteValueAMD64_OpAMD64BTSQconstmodify_0(v)
+ case OpAMD64BTSQmodify:
+ return rewriteValueAMD64_OpAMD64BTSQmodify_0(v)
case OpAMD64CMOVLCC:
return rewriteValueAMD64_OpAMD64CMOVLCC_0(v)
case OpAMD64CMOVLCS:
case OpAMD64MOVQloadidx8:
return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v)
case OpAMD64MOVQstore:
- return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v) || rewriteValueAMD64_OpAMD64MOVQstore_20(v)
+ return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v) || rewriteValueAMD64_OpAMD64MOVQstore_20(v) || rewriteValueAMD64_OpAMD64MOVQstore_30(v)
case OpAMD64MOVQstoreconst:
return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v)
case OpAMD64MOVQstoreconstidx1:
v.AddArg(x)
return true
}
+ // match: (ANDLconst [c] (BTRLconst [d] x))
+ // cond:
+ // result: (ANDLconst [c &^ 1<<uint32(d)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTRLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = c &^ 1 << uint32(d)
+ v.AddArg(x)
+ return true
+ }
// match: (ANDLconst [ 0xFF] x)
// cond:
// result: (MOVBQZX x)
v.AddArg(x)
return true
}
+ // match: (ANDQconst [c] (BTRQconst [d] x))
+ // cond:
+ // result: (ANDQconst [c &^ 1<<uint32(d)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTRQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = c &^ 1 << uint32(d)
+ v.AddArg(x)
+ return true
+ }
// match: (ANDQconst [ 0xFF] x)
// cond:
// result: (MOVBQZX x)
}
return false
}
+func rewriteValueAMD64_OpAMD64BTCLconst_0(v *Value) bool {
+ // match: (BTCLconst [c] (XORLconst [d] x))
+ // cond:
+ // result: (XORLconst [d ^ 1<<uint32(c)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64XORLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = d ^ 1<<uint32(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTCLconst [c] (BTCLconst [d] x))
+ // cond:
+ // result: (XORLconst [1<<uint32(c) ^ 1<<uint32(d)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTCLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = 1<<uint32(c) ^ 1<<uint32(d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTCLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [d^(1<<uint32(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = d ^ (1 << uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCLconstmodify_0(v *Value) bool {
+ // match: (BTCLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (BTCLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTCLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTCLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (BTCLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTCLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCLmodify_0(v *Value) bool {
+ // match: (BTCLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (BTCLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64BTCLmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTCLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (BTCLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTCLmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCQconst_0(v *Value) bool {
+ // match: (BTCQconst [c] (XORQconst [d] x))
+ // cond:
+ // result: (XORQconst [d ^ 1<<uint32(c)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64XORQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = d ^ 1<<uint32(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTCQconst [c] (BTCQconst [d] x))
+ // cond:
+ // result: (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTCQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = 1<<uint32(c) ^ 1<<uint32(d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTCQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [d^(1<<uint32(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = d ^ (1 << uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCQconstmodify_0(v *Value) bool {
+ // match: (BTCQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (BTCQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTCQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTCQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (BTCQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTCQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCQmodify_0(v *Value) bool {
+ // match: (BTCQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (BTCQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64BTCQmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTCQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (BTCQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTCQmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64BTLconst_0(v *Value) bool {
// match: (BTLconst [c] (SHRQconst [d] x))
// cond: (c+d)<64
v.AddArg(x)
return true
}
+ // match: (BTRLconst [c] (ANDLconst [d] x))
+ // cond:
+ // result: (ANDLconst [d &^ 1<<uint32(c)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = d &^ 1 << uint32(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRLconst [c] (BTRLconst [d] x))
+ // cond:
+ // result: (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTRLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = ^(1<<uint32(c) | 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [d&^(1<<uint32(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = d &^ (1 << uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRLconstmodify_0(v *Value) bool {
+ // match: (BTRLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (BTRLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTRLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTRLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (BTRLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTRLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRLmodify_0(v *Value) bool {
+ // match: (BTRLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (BTRLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64BTRLmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTRLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (BTRLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTRLmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64BTRQconst_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (BTRQconst [c] (ANDQconst [d] x))
+ // cond:
+ // result: (ANDQconst [d &^ 1<<uint32(c)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = d &^ 1 << uint32(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRQconst [c] (BTRQconst [d] x))
+ // cond:
+ // result: (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTRQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = ^(1<<uint32(c) | 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [d&^(1<<uint32(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = d &^ (1 << uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRQconstmodify_0(v *Value) bool {
+ // match: (BTRQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (BTRQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTRQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTRQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (BTRQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTRQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRQmodify_0(v *Value) bool {
+ // match: (BTRQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (BTRQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64BTRQmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTRQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (BTRQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTRQmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64BTSLconst_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (BTSLconst [c] (ORLconst [d] x))
+ // cond:
+ // result: (ORLconst [d | 1<<uint32(c)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ORLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = d | 1<<uint32(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSLconst [c] (BTSLconst [d] x))
+ // cond:
+ // result: (ORLconst [1<<uint32(d) | 1<<uint32(c)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTSLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = 1<<uint32(d) | 1<<uint32(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [d|(1<<uint32(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = d | (1 << uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSLconstmodify_0(v *Value) bool {
+ // match: (BTSLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (BTSLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTSLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTSLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (BTSLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTSLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSLmodify_0(v *Value) bool {
+ // match: (BTSLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (BTSLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64BTSLmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTSLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (BTSLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTSLmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64BTSQconst_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (BTSQconst [c] (ORQconst [d] x))
+ // cond:
+ // result: (ORQconst [d | 1<<uint32(c)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ORQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = d | 1<<uint32(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSQconst [c] (BTSQconst [d] x))
+ // cond:
+ // result: (ORQconst [1<<uint32(d) | 1<<uint32(c)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTSQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = 1<<uint32(d) | 1<<uint32(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [d|(1<<uint32(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = d | (1 << uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSQconstmodify_0(v *Value) bool {
+ // match: (BTSQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (BTSQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTSQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTSQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (BTSQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTSQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSQmodify_0(v *Value) bool {
+ // match: (BTSQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (BTSQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64BTSQmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTSQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (BTSQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTSQmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64CMOVLCC_0(v *Value) bool {
if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
break
}
- v.reset(OpAMD64ORLmodify)
+ v.reset(OpAMD64ORLmodify)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ORL x l:(MOVLload [off] {sym} ptr mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (ORLmodify [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64ORL {
+ break
+ }
+ _ = y.Args[1]
+ x := y.Args[0]
+ l := y.Args[1]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ if ptr != l.Args[0] {
+ break
+ }
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64ORLmodify)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (XORLmodify [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64XORL {
+ break
+ }
+ _ = y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ if ptr != l.Args[0] {
+ break
+ }
+ mem := l.Args[1]
+ x := y.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64XORLmodify)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORL x l:(MOVLload [off] {sym} ptr mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (XORLmodify [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64XORL {
+ break
+ }
+ _ = y.Args[1]
+ x := y.Args[0]
+ l := y.Args[1]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ if ptr != l.Args[0] {
+ break
+ }
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64XORLmodify)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(BTCL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (BTCLmodify [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64BTCL {
+ break
+ }
+ _ = y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ if ptr != l.Args[0] {
+ break
+ }
+ mem := l.Args[1]
+ x := y.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64BTCLmodify)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (MOVLstore {sym} [off] ptr y:(ORL x l:(MOVLload [off] {sym} ptr mem)) mem)
+ // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
- // result: (ORLmodify [off] {sym} ptr x mem)
+ // result: (BTRLmodify [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
y := v.Args[1]
- if y.Op != OpAMD64ORL {
+ if y.Op != OpAMD64BTRL {
break
}
_ = y.Args[1]
- x := y.Args[0]
- l := y.Args[1]
+ l := y.Args[0]
if l.Op != OpAMD64MOVLload {
break
}
break
}
mem := l.Args[1]
+ x := y.Args[1]
if mem != v.Args[2] {
break
}
if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
break
}
- v.reset(OpAMD64ORLmodify)
+ v.reset(OpAMD64BTRLmodify)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
- // result: (XORLmodify [off] {sym} ptr x mem)
+ // result: (BTSLmodify [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
y := v.Args[1]
- if y.Op != OpAMD64XORL {
+ if y.Op != OpAMD64BTSL {
break
}
_ = y.Args[1]
if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
break
}
- v.reset(OpAMD64XORLmodify)
+ v.reset(OpAMD64BTSLmodify)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (MOVLstore {sym} [off] ptr y:(XORL x l:(MOVLload [off] {sym} ptr mem)) mem)
- // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
- // result: (XORLmodify [off] {sym} ptr x mem)
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool {
+ // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
+ // result: (ADDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
- y := v.Args[1]
- if y.Op != OpAMD64XORL {
+ a := v.Args[1]
+ if a.Op != OpAMD64ADDLconst {
break
}
- _ = y.Args[1]
- x := y.Args[0]
- l := y.Args[1]
+ c := a.AuxInt
+ l := a.Args[0]
if l.Op != OpAMD64MOVLload {
break
}
break
}
_ = l.Args[1]
- if ptr != l.Args[0] {
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
+ v.reset(OpAMD64ADDLconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
+ // result: (ANDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
mem := l.Args[1]
if mem != v.Args[2] {
break
}
- if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
- v.reset(OpAMD64XORLmodify)
- v.AuxInt = off
+ v.reset(OpAMD64ANDLconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
v.AddArg(ptr)
- v.AddArg(x)
v.AddArg(mem)
return true
}
- // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
- // result: (ADDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
+ // result: (ORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
a := v.Args[1]
- if a.Op != OpAMD64ADDLconst {
+ if a.Op != OpAMD64ORLconst {
break
}
c := a.AuxInt
if mem != v.Args[2] {
break
}
- if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
- v.reset(OpAMD64ADDLconstmodify)
+ v.reset(OpAMD64ORLconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
- // result: (ANDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
+ // result: (XORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
a := v.Args[1]
- if a.Op != OpAMD64ANDLconst {
+ if a.Op != OpAMD64XORLconst {
break
}
c := a.AuxInt
if mem != v.Args[2] {
break
}
- if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
- v.reset(OpAMD64ANDLconstmodify)
+ v.reset(OpAMD64XORLconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
- // result: (ORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // match: (MOVLstore [off] {sym} ptr a:(BTCLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
+ // result: (BTCLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
a := v.Args[1]
- if a.Op != OpAMD64ORLconst {
+ if a.Op != OpAMD64BTCLconst {
break
}
c := a.AuxInt
if mem != v.Args[2] {
break
}
- if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
- v.reset(OpAMD64ORLconstmodify)
+ v.reset(OpAMD64BTCLconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool {
- // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
- // result: (XORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ // match: (MOVLstore [off] {sym} ptr a:(BTRLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
+ // result: (BTRLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
a := v.Args[1]
- if a.Op != OpAMD64XORLconst {
+ if a.Op != OpAMD64BTRLconst {
break
}
c := a.AuxInt
if mem != v.Args[2] {
break
}
- if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
- v.reset(OpAMD64XORLconstmodify)
+ v.reset(OpAMD64BTRLconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(BTSLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
+ // result: (BTSLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64BTSLconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
+ break
+ }
+ v.reset(OpAMD64BTSLconstmodify)
v.AuxInt = makeValAndOff(c, off)
v.Aux = sym
v.AddArg(ptr)
if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
break
}
- v.reset(OpAMD64ADDQmodify)
+ v.reset(OpAMD64ADDQmodify)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ADDQ x l:(MOVQload [off] {sym} ptr mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (ADDQmodify [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64ADDQ {
+ break
+ }
+ _ = y.Args[1]
+ x := y.Args[0]
+ l := y.Args[1]
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ if ptr != l.Args[0] {
+ break
+ }
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64ADDQmodify)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(SUBQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (SUBQmodify [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64SUBQ {
+ break
+ }
+ _ = y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ if ptr != l.Args[0] {
+ break
+ }
+ mem := l.Args[1]
+ x := y.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SUBQmodify)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (ANDQmodify [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64ANDQ {
+ break
+ }
+ _ = y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ if ptr != l.Args[0] {
+ break
+ }
+ mem := l.Args[1]
+ x := y.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64ANDQmodify)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (MOVQstore {sym} [off] ptr y:(ADDQ x l:(MOVQload [off] {sym} ptr mem)) mem)
+ // match: (MOVQstore {sym} [off] ptr y:(ANDQ x l:(MOVQload [off] {sym} ptr mem)) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
- // result: (ADDQmodify [off] {sym} ptr x mem)
+ // result: (ANDQmodify [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
y := v.Args[1]
- if y.Op != OpAMD64ADDQ {
+ if y.Op != OpAMD64ANDQ {
break
}
_ = y.Args[1]
if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
break
}
- v.reset(OpAMD64ADDQmodify)
+ v.reset(OpAMD64ANDQmodify)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (MOVQstore {sym} [off] ptr y:(SUBQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // match: (MOVQstore {sym} [off] ptr y:(ORQ l:(MOVQload [off] {sym} ptr mem) x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
- // result: (SUBQmodify [off] {sym} ptr x mem)
+ // result: (ORQmodify [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
y := v.Args[1]
- if y.Op != OpAMD64SUBQ {
+ if y.Op != OpAMD64ORQ {
break
}
_ = y.Args[1]
if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
break
}
- v.reset(OpAMD64SUBQmodify)
+ v.reset(OpAMD64ORQmodify)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // match: (MOVQstore {sym} [off] ptr y:(ORQ x l:(MOVQload [off] {sym} ptr mem)) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
- // result: (ANDQmodify [off] {sym} ptr x mem)
+ // result: (ORQmodify [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
y := v.Args[1]
- if y.Op != OpAMD64ANDQ {
+ if y.Op != OpAMD64ORQ {
break
}
_ = y.Args[1]
- l := y.Args[0]
+ x := y.Args[0]
+ l := y.Args[1]
if l.Op != OpAMD64MOVQload {
break
}
break
}
mem := l.Args[1]
- x := y.Args[1]
if mem != v.Args[2] {
break
}
if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
break
}
- v.reset(OpAMD64ANDQmodify)
+ v.reset(OpAMD64ORQmodify)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (MOVQstore {sym} [off] ptr y:(ANDQ x l:(MOVQload [off] {sym} ptr mem)) mem)
+ // match: (MOVQstore {sym} [off] ptr y:(XORQ l:(MOVQload [off] {sym} ptr mem) x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
- // result: (ANDQmodify [off] {sym} ptr x mem)
+ // result: (XORQmodify [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
y := v.Args[1]
- if y.Op != OpAMD64ANDQ {
+ if y.Op != OpAMD64XORQ {
break
}
_ = y.Args[1]
- x := y.Args[0]
- l := y.Args[1]
+ l := y.Args[0]
if l.Op != OpAMD64MOVQload {
break
}
break
}
mem := l.Args[1]
+ x := y.Args[1]
if mem != v.Args[2] {
break
}
if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
break
}
- v.reset(OpAMD64ANDQmodify)
+ v.reset(OpAMD64XORQmodify)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (MOVQstore {sym} [off] ptr y:(ORQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool {
+ // match: (MOVQstore {sym} [off] ptr y:(XORQ x l:(MOVQload [off] {sym} ptr mem)) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
- // result: (ORQmodify [off] {sym} ptr x mem)
+ // result: (XORQmodify [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
y := v.Args[1]
- if y.Op != OpAMD64ORQ {
+ if y.Op != OpAMD64XORQ {
break
}
_ = y.Args[1]
- l := y.Args[0]
+ x := y.Args[0]
+ l := y.Args[1]
if l.Op != OpAMD64MOVQload {
break
}
break
}
mem := l.Args[1]
- x := y.Args[1]
if mem != v.Args[2] {
break
}
if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
break
}
- v.reset(OpAMD64ORQmodify)
+ v.reset(OpAMD64XORQmodify)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (MOVQstore {sym} [off] ptr y:(ORQ x l:(MOVQload [off] {sym} ptr mem)) mem)
+ // match: (MOVQstore {sym} [off] ptr y:(BTCQ l:(MOVQload [off] {sym} ptr mem) x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
- // result: (ORQmodify [off] {sym} ptr x mem)
+ // result: (BTCQmodify [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
y := v.Args[1]
- if y.Op != OpAMD64ORQ {
+ if y.Op != OpAMD64BTCQ {
break
}
_ = y.Args[1]
- x := y.Args[0]
- l := y.Args[1]
+ l := y.Args[0]
if l.Op != OpAMD64MOVQload {
break
}
break
}
mem := l.Args[1]
+ x := y.Args[1]
if mem != v.Args[2] {
break
}
if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
break
}
- v.reset(OpAMD64ORQmodify)
+ v.reset(OpAMD64BTCQmodify)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (MOVQstore {sym} [off] ptr y:(XORQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
- // result: (XORQmodify [off] {sym} ptr x mem)
+ // result: (BTRQmodify [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
y := v.Args[1]
- if y.Op != OpAMD64XORQ {
+ if y.Op != OpAMD64BTRQ {
break
}
_ = y.Args[1]
if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
break
}
- v.reset(OpAMD64XORQmodify)
+ v.reset(OpAMD64BTRQmodify)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool {
- // match: (MOVQstore {sym} [off] ptr y:(XORQ x l:(MOVQload [off] {sym} ptr mem)) mem)
+ // match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
- // result: (XORQmodify [off] {sym} ptr x mem)
+ // result: (BTSQmodify [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
y := v.Args[1]
- if y.Op != OpAMD64XORQ {
+ if y.Op != OpAMD64BTSQ {
break
}
_ = y.Args[1]
- x := y.Args[0]
- l := y.Args[1]
+ l := y.Args[0]
if l.Op != OpAMD64MOVQload {
break
}
break
}
mem := l.Args[1]
+ x := y.Args[1]
if mem != v.Args[2] {
break
}
if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
break
}
- v.reset(OpAMD64XORQmodify)
+ v.reset(OpAMD64BTSQmodify)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
// result: (ADDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
if mem != v.Args[2] {
break
}
- if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
v.reset(OpAMD64ADDQconstmodify)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
// result: (ANDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
if mem != v.Args[2] {
break
}
- if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
v.reset(OpAMD64ANDQconstmodify)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
// result: (ORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
if mem != v.Args[2] {
break
}
- if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
v.reset(OpAMD64ORQconstmodify)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
// result: (XORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
if mem != v.Args[2] {
break
}
- if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
v.reset(OpAMD64XORQconstmodify)
v.AddArg(mem)
return true
}
+ // match: (MOVQstore [off] {sym} ptr a:(BTCQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
+ // result: (BTCQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64BTCQconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
+ break
+ }
+ v.reset(OpAMD64BTCQconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(BTRQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
+ // result: (BTRQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64BTRQconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
+ break
+ }
+ v.reset(OpAMD64BTRQconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQstore_30(v *Value) bool {
+ // match: (MOVQstore [off] {sym} ptr a:(BTSQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
+ // result: (BTSQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64BTSQconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
+ break
+ }
+ v.reset(OpAMD64BTSQconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem)
// cond:
// result: (MOVSDstore [off] {sym} ptr val mem)
v.AddArg(x)
return true
}
+ // match: (ORLconst [c] (ORLconst [d] x))
+ // cond:
+ // result: (ORLconst [c | d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ORLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = c | d
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORLconst [c] (BTSLconst [d] x))
+ // cond:
+ // result: (ORLconst [c | 1<<uint32(d)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTSLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = c | 1<<uint32(d)
+ v.AddArg(x)
+ return true
+ }
// match: (ORLconst [c] x)
// cond: int32(c)==0
// result: x
v.AddArg(x)
return true
}
+ // match: (ORQconst [c] (ORQconst [d] x))
+ // cond:
+ // result: (ORQconst [c | d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ORQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = c | d
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORQconst [c] (BTSQconst [d] x))
+ // cond:
+ // result: (ORQconst [c | 1<<uint32(d)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTSQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = c | 1<<uint32(d)
+ v.AddArg(x)
+ return true
+ }
// match: (ORQconst [0] x)
// cond:
// result: x
v.AddArg(x)
return true
}
+ // match: (XORLconst [c] (BTCLconst [d] x))
+ // cond:
+ // result: (XORLconst [c ^ 1<<uint32(d)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTCLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = c ^ 1<<uint32(d)
+ v.AddArg(x)
+ return true
+ }
// match: (XORLconst [c] x)
// cond: int32(c)==0
// result: x
v.AddArg(x)
return true
}
+ // match: (XORQconst [c] (BTCQconst [d] x))
+ // cond:
+ // result: (XORQconst [c ^ 1<<uint32(d)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTCQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = c ^ 1<<uint32(d)
+ v.AddArg(x)
+ return true
+ }
// match: (XORQconst [0] x)
// cond:
// result: x