s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[TUINT8], types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
+ sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
+ sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StorepNoWB",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
p.To.Reg = v.Reg0()
s.Prog(riscv.AFENCE)
+ case ssa.OpRISCV64LoweredAtomicLoad32, ssa.OpRISCV64LoweredAtomicLoad64:
+ as := riscv.ALRW
+ if v.Op == ssa.OpRISCV64LoweredAtomicLoad64 {
+ as = riscv.ALRD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
case ssa.OpRISCV64LoweredAtomicStore8:
s.Prog(riscv.AFENCE)
p := s.Prog(riscv.AMOVB)
p.To.Reg = v.Args[0].Reg()
s.Prog(riscv.AFENCE)
+ case ssa.OpRISCV64LoweredAtomicStore32, ssa.OpRISCV64LoweredAtomicStore64:
+ as := riscv.AAMOSWAPW
+ if v.Op == ssa.OpRISCV64LoweredAtomicStore64 {
+ as = riscv.AAMOSWAPD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.RegTo2 = riscv.REG_ZERO
+
case ssa.OpRISCV64LoweredZero:
mov, sz := largestMove(v.AuxInt)
(InterCall ...) -> (CALLinter ...)
// Atomic Intrinsics
-(AtomicLoad8 ...) -> (LoweredAtomicLoad8 ...)
-
-(AtomicStore8 ...) -> (LoweredAtomicStore8 ...)
+(AtomicLoad8 ...) -> (LoweredAtomicLoad8 ...)
+(AtomicLoad32 ...) -> (LoweredAtomicLoad32 ...)
+(AtomicLoad64 ...) -> (LoweredAtomicLoad64 ...)
+(AtomicLoadPtr ...) -> (LoweredAtomicLoad64 ...)
+
+(AtomicStore8 ...) -> (LoweredAtomicStore8 ...)
+(AtomicStore32 ...) -> (LoweredAtomicStore32 ...)
+(AtomicStore64 ...) -> (LoweredAtomicStore64 ...)
+(AtomicStorePtrNoWB ...) -> (LoweredAtomicStore64 ...)
// Optimizations
// load from arg0. arg1=mem.
// returns <value,memory> so they can be properly ordered with other loads.
{name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true},
// Atomic stores.
// store arg1 to arg0. arg2=mem. returns memory.
{name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
// Lowering pass-throughs
{name: "LoweredNilCheck", argLength: 2, faultOnNilArg0: true, nilCheck: true, reg: regInfo{inputs: []regMask{gpspMask}}}, // arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
OpRISCV64LoweredZero
OpRISCV64LoweredMove
OpRISCV64LoweredAtomicLoad8
+ OpRISCV64LoweredAtomicLoad32
+ OpRISCV64LoweredAtomicLoad64
OpRISCV64LoweredAtomicStore8
+ OpRISCV64LoweredAtomicStore32
+ OpRISCV64LoweredAtomicStore64
OpRISCV64LoweredNilCheck
OpRISCV64LoweredGetClosurePtr
OpRISCV64LoweredGetCallerSP
},
},
},
+ {
+ name: "LoweredAtomicLoad32",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad64",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
{
name: "LoweredAtomicStore8",
argLen: 3,
},
},
},
+ {
+ name: "LoweredAtomicStore32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore64",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ },
+ },
{
name: "LoweredNilCheck",
argLen: 2,
case OpAndB:
v.Op = OpRISCV64AND
return true
+ case OpAtomicLoad32:
+ v.Op = OpRISCV64LoweredAtomicLoad32
+ return true
+ case OpAtomicLoad64:
+ v.Op = OpRISCV64LoweredAtomicLoad64
+ return true
case OpAtomicLoad8:
v.Op = OpRISCV64LoweredAtomicLoad8
return true
+ case OpAtomicLoadPtr:
+ v.Op = OpRISCV64LoweredAtomicLoad64
+ return true
+ case OpAtomicStore32:
+ v.Op = OpRISCV64LoweredAtomicStore32
+ return true
+ case OpAtomicStore64:
+ v.Op = OpRISCV64LoweredAtomicStore64
+ return true
case OpAtomicStore8:
v.Op = OpRISCV64LoweredAtomicStore8
return true
+ case OpAtomicStorePtrNoWB:
+ v.Op = OpRISCV64LoweredAtomicStore64
+ return true
case OpAvg64u:
return rewriteValueRISCV64_OpAvg64u(v)
case OpClosureCall: