p5.From.Reg = out
}
- case ssa.OpPPC64LoweredAtomicExchange32,
+ case ssa.OpPPC64LoweredAtomicExchange8,
+ ssa.OpPPC64LoweredAtomicExchange32,
ssa.OpPPC64LoweredAtomicExchange64:
// LWSYNC
- // LDAR/LWAR (Rarg0), Rout
- // STDCCC/STWCCC Rout, (Rarg0)
+ // LDAR/LWAR/LBAR (Rarg0), Rout
+ // STDCCC/STWCCC/STBWCCC Rout, (Rarg0)
// BNE -2(PC)
// ISYNC
ld := ppc64.ALDAR
st := ppc64.ASTDCCC
- if v.Op == ssa.OpPPC64LoweredAtomicExchange32 {
+ switch v.Op {
+ case ssa.OpPPC64LoweredAtomicExchange8:
+ ld = ppc64.ALBAR
+ st = ppc64.ASTBCCC
+ case ssa.OpPPC64LoweredAtomicExchange32:
ld = ppc64.ALWAR
st = ppc64.ASTWCCC
}
// caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b.
plwsync := s.Prog(ppc64.ALWSYNC)
plwsync.To.Type = obj.TYPE_NONE
- // LDAR or LWAR
+ // L[B|W|D]AR
p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = out
- // STDCCC or STWCCC
+ // ST[B|W|D]CCC
p1 := s.Prog(st)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
(AtomicStore(8|32|64) ptr val mem) => (LoweredAtomicStore(8|32|64) [1] ptr val mem)
(AtomicStoreRel(32|64) ptr val mem) => (LoweredAtomicStore(32|64) [0] ptr val mem)
-(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
+(AtomicExchange(8|32|64) ...) => (LoweredAtomicExchange(8|32|64) ...)
(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
{name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
- // atomic exchange32, 64
+ // atomic exchange8, 32, 64
// LWSYNC
// LDAR (Rarg0), Rout
// STDCCC Rarg1, (Rarg0)
// BNE -2(PC)
// ISYNC
// return old val
+ {name: "LoweredAtomicExchange8", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
OpPPC64LoweredAtomicLoadPtr
OpPPC64LoweredAtomicAdd32
OpPPC64LoweredAtomicAdd64
+ OpPPC64LoweredAtomicExchange8
OpPPC64LoweredAtomicExchange32
OpPPC64LoweredAtomicExchange64
OpPPC64LoweredAtomicCas64
},
},
},
+ {
+ name: "LoweredAtomicExchange8",
+ argLen: 3,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
{
name: "LoweredAtomicExchange32",
argLen: 3,
case OpAtomicExchange64:
v.Op = OpPPC64LoweredAtomicExchange64
return true
+ case OpAtomicExchange8:
+ v.Op = OpPPC64LoweredAtomicExchange8
+ return true
case OpAtomicLoad32:
return rewriteValuePPC64_OpAtomicLoad32(v)
case OpAtomicLoad64:
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v)
},
- sys.AMD64)
+ sys.AMD64, sys.PPC64)
addF("internal/runtime/atomic", "Xchg",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
{"ppc64", "internal/runtime/atomic", "Xaddint32"}: struct{}{},
{"ppc64", "internal/runtime/atomic", "Xaddint64"}: struct{}{},
{"ppc64", "internal/runtime/atomic", "Xadduintptr"}: struct{}{},
+ {"ppc64", "internal/runtime/atomic", "Xchg8"}: struct{}{},
{"ppc64", "internal/runtime/atomic", "Xchg"}: struct{}{},
{"ppc64", "internal/runtime/atomic", "Xchg64"}: struct{}{},
{"ppc64", "internal/runtime/atomic", "Xchgint32"}: struct{}{},
{"ppc64le", "internal/runtime/atomic", "Xaddint32"}: struct{}{},
{"ppc64le", "internal/runtime/atomic", "Xaddint64"}: struct{}{},
{"ppc64le", "internal/runtime/atomic", "Xadduintptr"}: struct{}{},
+ {"ppc64le", "internal/runtime/atomic", "Xchg8"}: struct{}{},
{"ppc64le", "internal/runtime/atomic", "Xchg"}: struct{}{},
{"ppc64le", "internal/runtime/atomic", "Xchg64"}: struct{}{},
{"ppc64le", "internal/runtime/atomic", "Xchgint32"}: struct{}{},
//go:noescape
func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
+//go:noescape
+func Xchg8(ptr *uint8, new uint8) uint8
+
//go:noescape
func Xchg(ptr *uint32, new uint32) uint32
MOVD R3, ret+16(FP)
RET
+// uint8 Xchg(ptr *uint8, new uint8)
+// Atomically:
+// old := *ptr;
+// *ptr = new;
+// return old;
+TEXT ·Xchg8(SB), NOSPLIT, $0-17
+ MOVD ptr+0(FP), R4
+ MOVB new+8(FP), R5
+ LWSYNC
+ LBAR (R4), R3
+ STBCCC R5, (R4)
+ BNE -2(PC)
+ ISYNC
+ MOVB R3, ret+16(FP)
+ RET
+
// uint32 Xchg(ptr *uint32, new uint32)
// Atomically:
// old := *ptr;
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build amd64
+//go:build amd64 || ppc64 || ppc64le
package atomic_test