&& clobber(mem2)
-> (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
-// This is somewhat tricky. There may be pointers in SSE registers due to rule below.
-// However those register shouldn't live across GC safepoint.
-(MOVQstore [i] {s} p
- x1:(MOVQload [j] {s2} p2 mem)
- mem2:(MOVQstore [i-8] {s} p
- x2:(MOVQload [j-8] {s2} p2 mem) mem))
- && x1.Uses == 1
- && x2.Uses == 1
- && mem2.Uses == 1
- && config.useSSE
- && clobber(x1)
- && clobber(x2)
- && clobber(mem2)
- -> (MOVOstore [i-8] {s} p (MOVOload [j-8] {s2} p2 mem) mem)
-
-
// amd64p32 rules
// same as the rules above, but with 32 instead of 64 bit pointer arithmetic.
// LEAQ,ADDQ -> LEAL,ADDL
case OpAMD64MOVQloadidx8:
return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v)
case OpAMD64MOVQstore:
- return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v)
+ return rewriteValueAMD64_OpAMD64MOVQstore_0(v)
case OpAMD64MOVQstoreconst:
return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v)
case OpAMD64MOVQstoreconstidx1:
return false
}
func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool {
- b := v.Block
- _ = b
- config := b.Func.Config
- _ = config
// match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2)
// result: (MOVQstore [off1+off2] {sym} ptr val mem)
v.AddArg(mem)
return true
}
- // match: (MOVQstore [i] {s} p x1:(MOVQload [j] {s2} p2 mem) mem2:(MOVQstore [i-8] {s} p x2:(MOVQload [j-8] {s2} p2 mem) mem))
- // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && config.useSSE && clobber(x1) && clobber(x2) && clobber(mem2)
- // result: (MOVOstore [i-8] {s} p (MOVOload [j-8] {s2} p2 mem) mem)
- for {
- i := v.AuxInt
- s := v.Aux
- _ = v.Args[2]
- p := v.Args[0]
- x1 := v.Args[1]
- if x1.Op != OpAMD64MOVQload {
- break
- }
- j := x1.AuxInt
- s2 := x1.Aux
- _ = x1.Args[1]
- p2 := x1.Args[0]
- mem := x1.Args[1]
- mem2 := v.Args[2]
- if mem2.Op != OpAMD64MOVQstore {
- break
- }
- if mem2.AuxInt != i-8 {
- break
- }
- if mem2.Aux != s {
- break
- }
- _ = mem2.Args[2]
- if p != mem2.Args[0] {
- break
- }
- x2 := mem2.Args[1]
- if x2.Op != OpAMD64MOVQload {
- break
- }
- if x2.AuxInt != j-8 {
- break
- }
- if x2.Aux != s2 {
- break
- }
- _ = x2.Args[1]
- if p2 != x2.Args[0] {
- break
- }
- if mem != x2.Args[1] {
- break
- }
- if mem != mem2.Args[2] {
- break
- }
- if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && config.useSSE && clobber(x1) && clobber(x2) && clobber(mem2)) {
- break
- }
- v.reset(OpAMD64MOVOstore)
- v.AuxInt = i - 8
- v.Aux = s
- v.AddArg(p)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
- v0.AuxInt = j - 8
- v0.Aux = s2
- v0.AddArg(p2)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
// match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
// cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool {
// match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem)
// cond:
// result: (MOVSDstore [off] {sym} ptr val mem)