(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
(MOV(B|BU|H|HU|W|WU|D)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(FMOV(W|D)load [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) &&
+ is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
+ (base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (FMOV(W|D)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
(MOV(B|H|W|D)store [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) &&
is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
(MOV(B|H|W|D)storezero [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(FMOV(W|D)store [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) &&
+ is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
+ (base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (FMOV(W|D)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
(MOV(B|BU|H|HU|W|WU|D)load [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
(MOV(B|BU|H|HU|W|WU|D)load [off1+int32(off2)] {sym} base mem)
+(FMOV(W|D)load [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (FMOV(W|D)load [off1+int32(off2)] {sym} base mem)
+
(MOV(B|H|W|D)store [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
(MOV(B|H|W|D)store [off1+int32(off2)] {sym} base val mem)
(MOV(B|H|W|D)storezero [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
(MOV(B|H|W|D)storezero [off1+int32(off2)] {sym} base mem)
+(FMOV(W|D)store [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (FMOV(W|D)store [off1+int32(off2)] {sym} base val mem)
+
// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
// with OffPtr -> ADDI.
(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x)
(MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
(MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem)
+// Replace load from same location as preceding store with copy.
+(MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVXD x)
+(FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVDX x)
+(MOVWload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVXS x)
+(MOVWUload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWUreg (FMVXS x))
+(FMOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVSX x)
+
// If a register move has only 1 use, just use the same register without emitting instruction
// MOVnop does not emit an instruction, only for ensuring the type.
(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
{name: "FNMSUBS", argLength: 3, reg: fp31, asm: "FNMSUBS", commutative: true, typ: "Float32"}, // -(arg0 * arg1) - arg2
{name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS", typ: "Float32"}, // sqrt(arg0)
{name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS", typ: "Float32"}, // -arg0
- {name: "FMVSX", argLength: 1, reg: gpfp, asm: "FMVSX", typ: "Float32"}, // reinterpret arg0 as float
+ {name: "FMVSX", argLength: 1, reg: gpfp, asm: "FMVSX", typ: "Float32"}, // reinterpret arg0 as float32
+ {name: "FMVXS", argLength: 1, reg: fpgp, asm: "FMVXS", typ: "Int32"}, // reinterpret arg0 as int32, sign extended to 64 bits
{name: "FCVTSW", argLength: 1, reg: gpfp, asm: "FCVTSW", typ: "Float32"}, // float32(low 32 bits of arg0)
{name: "FCVTSL", argLength: 1, reg: gpfp, asm: "FCVTSL", typ: "Float32"}, // float32(arg0)
{name: "FCVTWS", argLength: 1, reg: fpgp, asm: "FCVTWS", typ: "Int32"}, // int32(arg0)
{name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD", typ: "Float64"}, // -arg0
{name: "FABSD", argLength: 1, reg: fp11, asm: "FABSD", typ: "Float64"}, // abs(arg0)
{name: "FSGNJD", argLength: 2, reg: fp21, asm: "FSGNJD", typ: "Float64"}, // copy sign of arg1 to arg0
- {name: "FMVDX", argLength: 1, reg: gpfp, asm: "FMVDX", typ: "Float64"}, // reinterpret arg0 as float
+ {name: "FMVDX", argLength: 1, reg: gpfp, asm: "FMVDX", typ: "Float64"}, // reinterpret arg0 as float64
+ {name: "FMVXD", argLength: 1, reg: fpgp, asm: "FMVXD", typ: "Int64"}, // reinterpret arg0 as int64
{name: "FCVTDW", argLength: 1, reg: gpfp, asm: "FCVTDW", typ: "Float64"}, // float64(low 32 bits of arg0)
{name: "FCVTDL", argLength: 1, reg: gpfp, asm: "FCVTDL", typ: "Float64"}, // float64(arg0)
{name: "FCVTWD", argLength: 1, reg: fpgp, asm: "FCVTWD", typ: "Int32"}, // int32(arg0)
return rewriteValueRISCV64_OpRISCV64FMADDD(v)
case OpRISCV64FMADDS:
return rewriteValueRISCV64_OpRISCV64FMADDS(v)
+ case OpRISCV64FMOVDload:
+ return rewriteValueRISCV64_OpRISCV64FMOVDload(v)
+ case OpRISCV64FMOVDstore:
+ return rewriteValueRISCV64_OpRISCV64FMOVDstore(v)
+ case OpRISCV64FMOVWload:
+ return rewriteValueRISCV64_OpRISCV64FMOVWload(v)
+ case OpRISCV64FMOVWstore:
+ return rewriteValueRISCV64_OpRISCV64FMOVWstore(v)
case OpRISCV64FMSUBD:
return rewriteValueRISCV64_OpRISCV64FMSUBD(v)
case OpRISCV64FMSUBS:
}
return false
}
+func rewriteValueRISCV64_OpRISCV64FMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (FMOVDload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (FMVDX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpRISCV64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpRISCV64FMVDX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (FMOVDstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FMOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (FMOVWload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (FMOVWload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (FMOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (FMVSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpRISCV64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpRISCV64FMVSX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FMOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (FMOVWstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (FMOVWstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64FMSUBD(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v.AddArg2(base, mem)
return true
}
+ // match: (MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (FMVXD x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpRISCV64FMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpRISCV64FMVXD)
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValueRISCV64_OpRISCV64MOVDnop(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
+ typ := &b.Func.Config.Types
// match: (MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
v.AddArg2(base, mem)
return true
}
+ // match: (MOVWUload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVWUreg (FMVXS x))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpRISCV64FMOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWUreg)
+ v0 := b.NewValue0(v_1.Pos, OpRISCV64FMVXS, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
return false
}
func rewriteValueRISCV64_OpRISCV64MOVWUreg(v *Value) bool {
v.AddArg2(base, mem)
return true
}
+ // match: (MOVWload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (FMVXS x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpRISCV64FMOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpRISCV64FMVXS)
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValueRISCV64_OpRISCV64MOVWreg(v *Value) bool {