These primitives will be used by the new And/Or sync/atomic apis.
Implemented for mips/mipsle and mips64/mips64le.
For #61395
Change-Id: Icc604a2b5cdfe72646d47d3c6a0bb49a0fd0d353
GitHub-Last-Rev:
95dca2a9f144c5d96dfa53eaf116e88be5f55040
GitHub-Pull-Request: golang/go#63297
Reviewed-on: https://go-review.googlesource.com/c/go/+/531835
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: Keith Randall <khr@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build arm || mips || mipsle || mips64 || mips64le || wasm
+//go:build arm || wasm
package atomic
//go:noescape
func Or(ptr *uint32, val uint32)
+//go:noescape
+func And32(ptr *uint32, val uint32) uint32
+
+//go:noescape
+func Or32(ptr *uint32, val uint32) uint32
+
+//go:noescape
+func And64(ptr *uint64, val uint64) uint64
+
+//go:noescape
+func Or64(ptr *uint64, val uint64) uint64
+
+//go:noescape
+func Anduintptr(ptr *uintptr, val uintptr) uintptr
+
+//go:noescape
+func Oruintptr(ptr *uintptr, val uintptr) uintptr
+
//go:noescape
func Cas64(ptr *uint64, old, new uint64) bool
SYNC
RET
+// func Or32(addr *uint32, v uint32) old uint32
+TEXT ·Or32(SB), NOSPLIT, $0-20
+ MOVV ptr+0(FP), R1
+ MOVW val+8(FP), R2
+
+ SYNC
+ LL (R1), R3
+ OR R2, R3, R4
+ SC R4, (R1)
+ BEQ R4, -3(PC)
+ SYNC
+ MOVW R3, ret+16(FP)
+ RET
+
+// func And32(addr *uint32, v uint32) old uint32
+TEXT ·And32(SB), NOSPLIT, $0-20
+ MOVV ptr+0(FP), R1
+ MOVW val+8(FP), R2
+
+ SYNC
+ LL (R1), R3
+ AND R2, R3, R4
+ SC R4, (R1)
+ BEQ R4, -3(PC)
+ SYNC
+ MOVW R3, ret+16(FP)
+ RET
+
+// func Or64(addr *uint64, v uint64) old uint64
+TEXT ·Or64(SB), NOSPLIT, $0-24
+ MOVV ptr+0(FP), R1
+ MOVV val+8(FP), R2
+
+ SYNC
+ LLV (R1), R3
+ OR R2, R3, R4
+ SCV R4, (R1)
+ BEQ R4, -3(PC)
+ SYNC
+ MOVV R3, ret+16(FP)
+ RET
+
+// func And64(addr *uint64, v uint64) old uint64
+TEXT ·And64(SB), NOSPLIT, $0-24
+ MOVV ptr+0(FP), R1
+ MOVV val+8(FP), R2
+
+ SYNC
+ LLV (R1), R3
+ AND R2, R3, R4
+ SCV R4, (R1)
+ BEQ R4, -3(PC)
+ SYNC
+ MOVV R3, ret+16(FP)
+ RET
+
+// func Anduintptr(addr *uintptr, v uintptr) old uintptr
+TEXT ·Anduintptr(SB), NOSPLIT, $0-24
+ JMP ·And64(SB)
+
+// func Oruintptr(addr *uintptr, v uintptr) old uintptr
+TEXT ·Oruintptr(SB), NOSPLIT, $0-24
+ JMP ·Or64(SB)
+
// uint32 ·Load(uint32 volatile* ptr)
TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12
MOVV ptr+0(FP), R1
return
}
+//go:nosplit
+func Or64(addr *uint64, val uint64) (old uint64) {
+ for {
+ old = *addr
+ if Cas64(addr, old, old|val) {
+ return old
+ }
+ }
+}
+
+//go:nosplit
+func And64(addr *uint64, val uint64) (old uint64) {
+ for {
+ old = *addr
+ if Cas64(addr, old, old&val) {
+ return old
+ }
+ }
+}
+
//go:noescape
func Xadd(ptr *uint32, delta int32) uint32
//go:noescape
func Or(ptr *uint32, val uint32)
+//go:noescape
+func And32(ptr *uint32, val uint32) uint32
+
+//go:noescape
+func Or32(ptr *uint32, val uint32) uint32
+
+//go:noescape
+func Anduintptr(ptr *uintptr, val uintptr) uintptr
+
+//go:noescape
+func Oruintptr(ptr *uintptr, val uintptr) uintptr
+
//go:noescape
func Store(ptr *uint32, val uint32)
SYNC
RET
+// func Or32(addr *uint32, v uint32) old uint32
+TEXT ·Or32(SB), NOSPLIT, $0-12
+ MOVW ptr+0(FP), R1
+ MOVW val+4(FP), R2
+
+ SYNC
+ LL (R1), R3
+ OR R2, R3, R4
+ SC R4, (R1)
+ BEQ R4, -4(PC)
+ SYNC
+ MOVW R3, ret+8(FP)
+ RET
+
+// func And32(addr *uint32, v uint32) old uint32
+TEXT ·And32(SB), NOSPLIT, $0-12
+ MOVW ptr+0(FP), R1
+ MOVW val+4(FP), R2
+
+ SYNC
+ LL (R1), R3
+ AND R2, R3, R4
+ SC R4, (R1)
+ BEQ R4, -4(PC)
+ SYNC
+ MOVW R3, ret+8(FP)
+ RET
+
+// func Anduintptr(addr *uintptr, v uintptr) old uintptr
+TEXT ·Anduintptr(SB), NOSPLIT, $0-12
+ JMP ·And32(SB)
+
+// func Oruintptr(addr *uintptr, v uintptr) old uintptr
+TEXT ·Oruintptr(SB), NOSPLIT, $0-12
+ JMP ·Or32(SB)
+
TEXT ·spinLock(SB),NOSPLIT,$0-4
MOVW state+0(FP), R1
MOVW $1, R2