From 192d65e46b38381653ccbe16cac49f7fa36aac93 Mon Sep 17 00:00:00 2001 From: Mauri de Souza Meneguzzo Date: Thu, 16 May 2024 22:12:47 +0000 Subject: [PATCH] sync/atomic: public And/Or ops and race instrumentation This CL implements the new sync/atomic AND and OR apis as well as their race counterparts. Fixes #61395 Change-Id: I294eefe4b3ac27bc4ed237edcbfa88a8c646d86f GitHub-Last-Rev: f174297007c7b81b1ff4a687ef23d955a3ffd4db GitHub-Pull-Request: golang/go#64331 Reviewed-on: https://go-review.googlesource.com/c/go/+/544455 Reviewed-by: Keith Randall Reviewed-by: Austin Clements Auto-Submit: Austin Clements LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall --- api/next/61395.txt | 20 + .../6-stdlib/99-minor/sync/atomic/61395.md | 3 + src/runtime/race.go | 34 ++ src/runtime/race_amd64.s | 51 ++ src/runtime/race_arm64.s | 50 ++ src/runtime/race_ppc64le.s | 46 ++ src/runtime/race_s390x.s | 50 ++ src/sync/atomic/asm.s | 30 ++ src/sync/atomic/atomic_test.go | 466 ++++++++++++++++++ src/sync/atomic/doc.go | 50 ++ src/sync/atomic/type.go | 40 ++ 11 files changed, 840 insertions(+) create mode 100644 api/next/61395.txt create mode 100644 doc/next/6-stdlib/99-minor/sync/atomic/61395.md diff --git a/api/next/61395.txt b/api/next/61395.txt new file mode 100644 index 0000000000..0efca67b62 --- /dev/null +++ b/api/next/61395.txt @@ -0,0 +1,20 @@ +pkg sync/atomic, func AndInt32(*int32, int32) int32 #61395 +pkg sync/atomic, func AndInt64(*int64, int64) int64 #61395 +pkg sync/atomic, func AndUint32(*uint32, uint32) uint32 #61395 +pkg sync/atomic, func AndUint64(*uint64, uint64) uint64 #61395 +pkg sync/atomic, func AndUintptr(*uintptr, uintptr) uintptr #61395 +pkg sync/atomic, func OrInt32(*int32, int32) int32 #61395 +pkg sync/atomic, func OrInt64(*int64, int64) int64 #61395 +pkg sync/atomic, func OrUint32(*uint32, uint32) uint32 #61395 +pkg sync/atomic, func OrUint64(*uint64, uint64) uint64 #61395 +pkg sync/atomic, func OrUintptr(*uintptr, uintptr) uintptr #61395 +pkg sync/atomic, method (*Int32) And(int32) int32 #61395 +pkg sync/atomic, method (*Int64) And(int64) int64 #61395 +pkg sync/atomic, method (*Uint32) And(uint32) uint32 #61395 +pkg sync/atomic, method (*Uint64) And(uint64) uint64 #61395 +pkg sync/atomic, method (*Uintptr) And(uintptr) uintptr #61395 +pkg sync/atomic, method (*Int32) Or(int32) int32 #61395 +pkg sync/atomic, method (*Int64) Or(int64) int64 #61395 +pkg sync/atomic, method (*Uint32) Or(uint32) uint32 #61395 +pkg sync/atomic, method (*Uint64) Or(uint64) uint64 #61395 +pkg sync/atomic, method (*Uintptr) Or(uintptr) uintptr #61395 diff --git a/doc/next/6-stdlib/99-minor/sync/atomic/61395.md b/doc/next/6-stdlib/99-minor/sync/atomic/61395.md new file mode 100644 index 0000000000..05359347f4 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/sync/atomic/61395.md @@ -0,0 +1,3 @@ + +The new [`atomic.And`](/pkg/sync/atomic#And) and [`atomic.Or`](/pkg/sync/atomic#Or) +operators apply a bitwise `AND` or `OR` to the given input, returning the old value. diff --git a/src/runtime/race.go b/src/runtime/race.go index 9acc0c6920..7d5cbce49e 100644 --- a/src/runtime/race.go +++ b/src/runtime/race.go @@ -323,6 +323,10 @@ var __tsan_report_count byte //go:cgo_import_static __tsan_go_atomic64_exchange //go:cgo_import_static __tsan_go_atomic32_fetch_add //go:cgo_import_static __tsan_go_atomic64_fetch_add +//go:cgo_import_static __tsan_go_atomic32_fetch_and +//go:cgo_import_static __tsan_go_atomic64_fetch_and +//go:cgo_import_static __tsan_go_atomic32_fetch_or +//go:cgo_import_static __tsan_go_atomic64_fetch_or //go:cgo_import_static __tsan_go_atomic32_compare_exchange //go:cgo_import_static __tsan_go_atomic64_compare_exchange @@ -642,6 +646,36 @@ func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64) //go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr) +//go:linkname abigen_sync_atomic_AndInt32 sync/atomic.AndInt32 +func abigen_sync_atomic_AndInt32(addr *int32, mask int32) (old int32) + +//go:linkname abigen_sync_atomic_AndUint32 sync/atomic.AndUint32 +func abigen_sync_atomic_AndUint32(addr *uint32, mask uint32) (old uint32) + +//go:linkname abigen_sync_atomic_AndInt64 sync/atomic.AndInt64 +func abigen_sync_atomic_AndInt64(addr *int64, mask int64) (old int64) + +//go:linkname abigen_sync_atomic_AndUint64 sync/atomic.AndUint64 +func abigen_sync_atomic_AndUint64(addr *uint64, mask uint64) (old uint64) + +//go:linkname abigen_sync_atomic_AndUintptr sync/atomic.AndUintptr +func abigen_sync_atomic_AndUintptr(addr *uintptr, mask uintptr) (old uintptr) + +//go:linkname abigen_sync_atomic_OrInt32 sync/atomic.OrInt32 +func abigen_sync_atomic_OrInt32(addr *int32, mask int32) (old int32) + +//go:linkname abigen_sync_atomic_OrUint32 sync/atomic.OrUint32 +func abigen_sync_atomic_OrUint32(addr *uint32, mask uint32) (old uint32) + +//go:linkname abigen_sync_atomic_OrInt64 sync/atomic.OrInt64 +func abigen_sync_atomic_OrInt64(addr *int64, mask int64) (old int64) + +//go:linkname abigen_sync_atomic_OrUint64 sync/atomic.OrUint64 +func abigen_sync_atomic_OrUint64(addr *uint64, mask uint64) (old uint64) + +//go:linkname abigen_sync_atomic_OrUintptr sync/atomic.OrUintptr +func abigen_sync_atomic_OrUintptr(addr *uintptr, mask uintptr) (old uintptr) + //go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32 func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool) diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s index 45c1255509..c4a6d49316 100644 --- a/src/runtime/race_amd64.s +++ b/src/runtime/race_amd64.s @@ -303,6 +303,57 @@ TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24 GO_ARGS JMP sync∕atomic·AddInt64(SB) +// And +TEXT sync∕atomic·AndInt32(SB), NOSPLIT|NOFRAME, $0-20 + GO_ARGS + MOVQ $__tsan_go_atomic32_fetch_and(SB), AX + CALL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·AndInt64(SB), NOSPLIT|NOFRAME, $0-24 + GO_ARGS + MOVQ $__tsan_go_atomic64_fetch_and(SB), AX + CALL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·AndUint32(SB), NOSPLIT, $0-20 + GO_ARGS + JMP sync∕atomic·AndInt32(SB) + +TEXT sync∕atomic·AndUint64(SB), NOSPLIT, $0-24 + GO_ARGS + JMP sync∕atomic·AndInt64(SB) + +TEXT sync∕atomic·AndUintptr(SB), NOSPLIT, $0-24 + GO_ARGS + JMP sync∕atomic·AndInt64(SB) + +// Or +TEXT sync∕atomic·OrInt32(SB), NOSPLIT|NOFRAME, $0-20 + GO_ARGS + MOVQ $__tsan_go_atomic32_fetch_or(SB), AX + CALL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·OrInt64(SB), NOSPLIT|NOFRAME, $0-24 + GO_ARGS + MOVQ $__tsan_go_atomic64_fetch_or(SB), AX + CALL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·OrUint32(SB), NOSPLIT, $0-20 + GO_ARGS + JMP sync∕atomic·OrInt32(SB) + +TEXT sync∕atomic·OrUint64(SB), NOSPLIT, $0-24 + GO_ARGS + JMP sync∕atomic·OrInt64(SB) + +TEXT sync∕atomic·OrUintptr(SB), NOSPLIT, $0-24 + GO_ARGS + JMP sync∕atomic·OrInt64(SB) + + // CompareAndSwap TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT|NOFRAME, $0-17 GO_ARGS diff --git a/src/runtime/race_arm64.s b/src/runtime/race_arm64.s index ae0030cf10..c42a6c1377 100644 --- a/src/runtime/race_arm64.s +++ b/src/runtime/race_arm64.s @@ -312,6 +312,56 @@ TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24 GO_ARGS JMP sync∕atomic·AddInt64(SB) +// And +TEXT sync∕atomic·AndInt32(SB), NOSPLIT, $0-20 + GO_ARGS + MOVD $__tsan_go_atomic32_fetch_and(SB), R9 + BL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·AndInt64(SB), NOSPLIT, $0-24 + GO_ARGS + MOVD $__tsan_go_atomic64_fetch_and(SB), R9 + BL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·AndUint32(SB), NOSPLIT, $0-20 + GO_ARGS + JMP sync∕atomic·AndInt32(SB) + +TEXT sync∕atomic·AndUint64(SB), NOSPLIT, $0-24 + GO_ARGS + JMP sync∕atomic·AndInt64(SB) + +TEXT sync∕atomic·AndUintptr(SB), NOSPLIT, $0-24 + GO_ARGS + JMP sync∕atomic·AndInt64(SB) + +// Or +TEXT sync∕atomic·OrInt32(SB), NOSPLIT, $0-20 + GO_ARGS + MOVD $__tsan_go_atomic32_fetch_or(SB), R9 + BL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·OrInt64(SB), NOSPLIT, $0-24 + GO_ARGS + MOVD $__tsan_go_atomic64_fetch_or(SB), R9 + BL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·OrUint32(SB), NOSPLIT, $0-20 + GO_ARGS + JMP sync∕atomic·OrInt32(SB) + +TEXT sync∕atomic·OrUint64(SB), NOSPLIT, $0-24 + GO_ARGS + JMP sync∕atomic·OrInt64(SB) + +TEXT sync∕atomic·OrUintptr(SB), NOSPLIT, $0-24 + GO_ARGS + JMP sync∕atomic·OrInt64(SB) + // CompareAndSwap TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17 GO_ARGS diff --git a/src/runtime/race_ppc64le.s b/src/runtime/race_ppc64le.s index 39cfffc39b..43829479bd 100644 --- a/src/runtime/race_ppc64le.s +++ b/src/runtime/race_ppc64le.s @@ -325,6 +325,52 @@ TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24 GO_ARGS BR sync∕atomic·AddInt64(SB) +// And +TEXT sync∕atomic·AndInt32(SB), NOSPLIT, $0-20 + GO_ARGS + MOVD $__tsan_go_atomic32_fetch_and(SB), R8 + BR racecallatomic<>(SB) + +TEXT sync∕atomic·AndInt64(SB), NOSPLIT, $0-24 + GO_ARGS + MOVD $__tsan_go_atomic64_fetch_and(SB), R8 + BR racecallatomic<>(SB) + +TEXT sync∕atomic·AndUint32(SB), NOSPLIT, $0-20 + GO_ARGS + BR sync∕atomic·AndInt32(SB) + +TEXT sync∕atomic·AndUint64(SB), NOSPLIT, $0-24 + GO_ARGS + BR sync∕atomic·AndInt64(SB) + +TEXT sync∕atomic·AndUintptr(SB), NOSPLIT, $0-24 + GO_ARGS + BR sync∕atomic·AndInt64(SB) + +// Or +TEXT sync∕atomic·OrInt32(SB), NOSPLIT, $0-20 + GO_ARGS + MOVD $__tsan_go_atomic32_fetch_or(SB), R8 + BR racecallatomic<>(SB) + +TEXT sync∕atomic·OrInt64(SB), NOSPLIT, $0-24 + GO_ARGS + MOVD $__tsan_go_atomic64_fetch_or(SB), R8 + BR racecallatomic<>(SB) + +TEXT sync∕atomic·OrUint32(SB), NOSPLIT, $0-20 + GO_ARGS + BR sync∕atomic·OrInt32(SB) + +TEXT sync∕atomic·OrUint64(SB), NOSPLIT, $0-24 + GO_ARGS + BR sync∕atomic·OrInt64(SB) + +TEXT sync∕atomic·OrUintptr(SB), NOSPLIT, $0-24 + GO_ARGS + BR sync∕atomic·OrInt64(SB) + // CompareAndSwap in tsan TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17 GO_ARGS diff --git a/src/runtime/race_s390x.s b/src/runtime/race_s390x.s index dadc12f4db..8e6a5d576a 100644 --- a/src/runtime/race_s390x.s +++ b/src/runtime/race_s390x.s @@ -274,6 +274,56 @@ TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24 GO_ARGS JMP sync∕atomic·AddInt64(SB) +// And +TEXT sync∕atomic·AndInt32(SB), NOSPLIT, $0-20 + GO_ARGS + MOVD $__tsan_go_atomic32_fetch_and(SB), R1 + BL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·AndInt64(SB), NOSPLIT, $0-24 + GO_ARGS + MOVD $__tsan_go_atomic64_fetch_and(SB), R1 + BL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·AndUint32(SB), NOSPLIT, $0-20 + GO_ARGS + JMP sync∕atomic·AndInt32(SB) + +TEXT sync∕atomic·AndUint64(SB), NOSPLIT, $0-24 + GO_ARGS + JMP sync∕atomic·AndInt64(SB) + +TEXT sync∕atomic·AndUintptr(SB), NOSPLIT, $0-24 + GO_ARGS + JMP sync∕atomic·AndInt64(SB) + +// Or +TEXT sync∕atomic·OrInt32(SB), NOSPLIT, $0-20 + GO_ARGS + MOVD $__tsan_go_atomic32_fetch_or(SB), R1 + BL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·OrInt64(SB), NOSPLIT, $0-24 + GO_ARGS + MOVD $__tsan_go_atomic64_fetch_or(SB), R1 + BL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·OrUint32(SB), NOSPLIT, $0-20 + GO_ARGS + JMP sync∕atomic·OrInt32(SB) + +TEXT sync∕atomic·OrUint64(SB), NOSPLIT, $0-24 + GO_ARGS + JMP sync∕atomic·OrInt64(SB) + +TEXT sync∕atomic·OrUintptr(SB), NOSPLIT, $0-24 + GO_ARGS + JMP sync∕atomic·OrInt64(SB) + // CompareAndSwap TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17 diff --git a/src/sync/atomic/asm.s b/src/sync/atomic/asm.s index b9318fe8b7..c46869ede7 100644 --- a/src/sync/atomic/asm.s +++ b/src/sync/atomic/asm.s @@ -83,3 +83,33 @@ TEXT ·StoreUint64(SB),NOSPLIT,$0 TEXT ·StoreUintptr(SB),NOSPLIT,$0 JMP internal∕runtime∕atomic·Storeuintptr(SB) + +TEXT ·AndInt32(SB),NOSPLIT,$0 + JMP internal∕runtime∕atomic·And32(SB) + +TEXT ·AndUint32(SB),NOSPLIT,$0 + JMP internal∕runtime∕atomic·And32(SB) + +TEXT ·AndUintptr(SB),NOSPLIT,$0 + JMP internal∕runtime∕atomic·Anduintptr(SB) + +TEXT ·AndInt64(SB),NOSPLIT,$0 + JMP internal∕runtime∕atomic·And64(SB) + +TEXT ·AndUint64(SB),NOSPLIT,$0 + JMP internal∕runtime∕atomic·And64(SB) + +TEXT ·OrInt32(SB),NOSPLIT,$0 + JMP internal∕runtime∕atomic·Or32(SB) + +TEXT ·OrUint32(SB),NOSPLIT,$0 + JMP internal∕runtime∕atomic·Or32(SB) + +TEXT ·OrUintptr(SB),NOSPLIT,$0 + JMP internal∕runtime∕atomic·Oruintptr(SB) + +TEXT ·OrInt64(SB),NOSPLIT,$0 + JMP internal∕runtime∕atomic·Or64(SB) + +TEXT ·OrUint64(SB),NOSPLIT,$0 + JMP internal∕runtime∕atomic·Or64(SB) diff --git a/src/sync/atomic/atomic_test.go b/src/sync/atomic/atomic_test.go index c3604ef0af..0617b27aca 100644 --- a/src/sync/atomic/atomic_test.go +++ b/src/sync/atomic/atomic_test.go @@ -531,6 +531,472 @@ func TestAddUintptrMethod(t *testing.T) { } } +func TestAndInt32(t *testing.T) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + x.i = -1 + j := x.i + for mask := int32(1); mask != 0; mask <<= 1 { + old := x.i + k := AndInt32(&x.i, ^mask) + j &= ^mask + if x.i != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAndInt32Method(t *testing.T) { + var x struct { + before int32 + i Int32 + after int32 + } + x.before = magic32 + x.after = magic32 + x.i.Store(-1) + j := x.i.Load() + for mask := int32(1); mask != 0; mask <<= 1 { + old := x.i.Load() + k := x.i.And(^mask) + j &= ^mask + if x.i.Load() != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAndUint32(t *testing.T) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + x.i = 0xffffffff + j := x.i + for mask := uint32(1); mask != 0; mask <<= 1 { + old := x.i + k := AndUint32(&x.i, ^mask) + j &= ^mask + if x.i != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAndUint32Method(t *testing.T) { + var x struct { + before uint32 + i Uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + x.i.Store(0xffffffff) + j := x.i.Load() + for mask := uint32(1); mask != 0; mask <<= 1 { + old := x.i.Load() + k := x.i.And(^mask) + j &= ^mask + if x.i.Load() != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAndInt64(t *testing.T) { + var x struct { + before int64 + i int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + x.i = -1 + j := x.i + for mask := int64(1); mask != 0; mask <<= 1 { + old := x.i + k := AndInt64(&x.i, ^mask) + j &= ^mask + if x.i != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestAndInt64Method(t *testing.T) { + var x struct { + before int64 + i Int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + x.i.Store(-1) + j := x.i.Load() + for mask := int64(1); mask != 0; mask <<= 1 { + old := x.i.Load() + k := x.i.And(^mask) + j &= ^mask + if x.i.Load() != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestAndUint64(t *testing.T) { + var x struct { + before uint64 + i uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + x.i = 0xfffffffffffffff + j := x.i + for mask := uint64(1); mask != 0; mask <<= 1 { + old := x.i + k := AndUint64(&x.i, ^mask) + j &= ^mask + if x.i != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestAndUint64Method(t *testing.T) { + var x struct { + before uint64 + i Uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + x.i.Store(0xfffffffffffffff) + j := x.i.Load() + for mask := uint64(1); mask != 0; mask <<= 1 { + old := x.i.Load() + k := x.i.And(^mask) + j &= ^mask + if x.i.Load() != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestAndUintptr(t *testing.T) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + x.i = ^uintptr(0) + j := x.i + for mask := uintptr(1); mask != 0; mask <<= 1 { + old := x.i + k := AndUintptr(&x.i, ^mask) + j &= ^mask + if x.i != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestAndUintptrMethod(t *testing.T) { + var x struct { + before uintptr + i Uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + x.i.Store(^uintptr(0)) + j := x.i.Load() + for mask := uintptr(1); mask != 0; mask <<= 1 { + old := x.i.Load() + k := x.i.And(^mask) + j &= ^mask + if x.i.Load() != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestOrInt32(t *testing.T) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + var j int32 + for mask := int32(1); mask != 0; mask <<= 1 { + old := x.i + k := OrInt32(&x.i, mask) + j |= mask + if x.i != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestOrInt32Method(t *testing.T) { + var x struct { + before int32 + i Int32 + after int32 + } + x.before = magic32 + x.after = magic32 + var j int32 + for mask := int32(1); mask != 0; mask <<= 1 { + old := x.i.Load() + k := x.i.Or(mask) + j |= mask + if x.i.Load() != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestOrUint32(t *testing.T) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + var j uint32 + for mask := uint32(1); mask != 0; mask <<= 1 { + old := x.i + k := OrUint32(&x.i, mask) + j |= mask + if x.i != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestOrUint32Method(t *testing.T) { + var x struct { + before uint32 + i Uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + var j uint32 + for mask := uint32(1); mask != 0; mask <<= 1 { + old := x.i.Load() + k := x.i.Or(mask) + j |= mask + if x.i.Load() != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestOrInt64(t *testing.T) { + var x struct { + before int64 + i int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + var j int64 + for mask := int64(1); mask != 0; mask <<= 1 { + old := x.i + k := OrInt64(&x.i, mask) + j |= mask + if x.i != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestOrInt64Method(t *testing.T) { + var x struct { + before int64 + i Int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + var j int64 + for mask := int64(1); mask != 0; mask <<= 1 { + old := x.i.Load() + k := x.i.Or(mask) + j |= mask + if x.i.Load() != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestOrUint64(t *testing.T) { + var x struct { + before uint64 + i uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + var j uint64 + for mask := uint64(1); mask != 0; mask <<= 1 { + old := x.i + k := OrUint64(&x.i, mask) + j |= mask + if x.i != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestOrUint64Method(t *testing.T) { + var x struct { + before uint64 + i Uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + var j uint64 + for mask := uint64(1); mask != 0; mask <<= 1 { + old := x.i.Load() + k := x.i.Or(mask) + j |= mask + if x.i.Load() != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestOrUintptr(t *testing.T) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for mask := uintptr(1); mask != 0; mask <<= 1 { + old := x.i + k := OrUintptr(&x.i, mask) + j |= mask + if x.i != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestOrUintptrMethod(t *testing.T) { + var x struct { + before uintptr + i Uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for mask := uintptr(1); mask != 0; mask <<= 1 { + old := x.i.Load() + k := x.i.Or(mask) + j |= mask + if x.i.Load() != j || k != old { + t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + func TestCompareAndSwapInt32(t *testing.T) { var x struct { before int32 diff --git a/src/sync/atomic/doc.go b/src/sync/atomic/doc.go index c22d1159af..1f7f9b277e 100644 --- a/src/sync/atomic/doc.go +++ b/src/sync/atomic/doc.go @@ -139,6 +139,56 @@ func AddUint64(addr *uint64, delta uint64) (new uint64) // Consider using the more ergonomic and less error-prone [Uintptr.Add] instead. func AddUintptr(addr *uintptr, delta uintptr) (new uintptr) +// AndInt32 atomically performs a bitwise AND operation on *addr using the bitmask provided as mask +// and returns the old value. +// Consider using the more ergonomic and less error-prone [Int32.And] instead. +func AndInt32(addr *int32, mask int32) (old int32) + +// AndUint32 atomically performs a bitwise AND operation on *addr using the bitmask provided as mask +// and returns the old value. +// Consider using the more ergonomic and less error-prone [Uint32.And] instead. +func AndUint32(addr *uint32, mask uint32) (old uint32) + +// AndInt64 atomically performs a bitwise AND operation on *addr using the bitmask provided as mask +// and returns the old value. +// Consider using the more ergonomic and less error-prone [Int64.And] instead. +func AndInt64(addr *int64, mask int64) (old int64) + +// AndUint64 atomically performs a bitwise AND operation on *addr using the bitmask provided as mask +// and returns the old. +// Consider using the more ergonomic and less error-prone [Uint64.And] instead. +func AndUint64(addr *uint64, mask uint64) (old uint64) + +// AndUintptr atomically performs a bitwise AND operation on *addr using the bitmask provided as mask +// and returns the old value. +// Consider using the more ergonomic and less error-prone [Uintptr.And] instead. +func AndUintptr(addr *uintptr, mask uintptr) (old uintptr) + +// OrInt32 atomically performs a bitwise OR operation on *addr using the bitmask provided as mask +// and returns the old value. +// Consider using the more ergonomic and less error-prone [Int32.Or] instead. +func OrInt32(addr *int32, mask int32) (old int32) + +// OrUint32 atomically performs a bitwise OR operation on *addr using the bitmask provided as mask +// and returns the old value. +// Consider using the more ergonomic and less error-prone [Uint32.Or] instead. +func OrUint32(addr *uint32, mask uint32) (old uint32) + +// OrInt64 atomically performs a bitwise OR operation on *addr using the bitmask provided as mask +// and returns the old value. +// Consider using the more ergonomic and less error-prone [Int64.Or] instead. +func OrInt64(addr *int64, mask int64) (old int64) + +// OrUint64 atomically performs a bitwise OR operation on *addr using the bitmask provided as mask +// and returns the old value. +// Consider using the more ergonomic and less error-prone [Uint64.Or] instead. +func OrUint64(addr *uint64, mask uint64) (old uint64) + +// OrUintptr atomically performs a bitwise OR operation on *addr using the bitmask provided as mask +// and returns the old value. +// Consider using the more ergonomic and less error-prone [Uintptr.Or] instead. +func OrUintptr(addr *uintptr, mask uintptr) (old uintptr) + // LoadInt32 atomically loads *addr. // Consider using the more ergonomic and less error-prone [Int32.Load] instead. func LoadInt32(addr *int32) (val int32) diff --git a/src/sync/atomic/type.go b/src/sync/atomic/type.go index 179fa93092..7d2b6805bc 100644 --- a/src/sync/atomic/type.go +++ b/src/sync/atomic/type.go @@ -87,6 +87,14 @@ func (x *Int32) CompareAndSwap(old, new int32) (swapped bool) { // Add atomically adds delta to x and returns the new value. func (x *Int32) Add(delta int32) (new int32) { return AddInt32(&x.v, delta) } +// And atomically performs a bitwise AND operation on x using the bitmask +// provided as mask and returns the old value. +func (x *Int32) And(mask int32) (old int32) { return AndInt32(&x.v, mask) } + +// Or atomically performs a bitwise OR operation on x using the bitmask +// provided as mask and returns the old value. +func (x *Int32) Or(mask int32) (old int32) { return OrInt32(&x.v, mask) } + // An Int64 is an atomic int64. The zero value is zero. type Int64 struct { _ noCopy @@ -111,6 +119,14 @@ func (x *Int64) CompareAndSwap(old, new int64) (swapped bool) { // Add atomically adds delta to x and returns the new value. func (x *Int64) Add(delta int64) (new int64) { return AddInt64(&x.v, delta) } +// And atomically performs a bitwise AND operation on x using the bitmask +// provided as mask and returns the old value. +func (x *Int64) And(mask int64) (old int64) { return AndInt64(&x.v, mask) } + +// Or atomically performs a bitwise OR operation on x using the bitmask +// provided as mask and returns the old value. +func (x *Int64) Or(mask int64) (old int64) { return OrInt64(&x.v, mask) } + // A Uint32 is an atomic uint32. The zero value is zero. type Uint32 struct { _ noCopy @@ -134,6 +150,14 @@ func (x *Uint32) CompareAndSwap(old, new uint32) (swapped bool) { // Add atomically adds delta to x and returns the new value. func (x *Uint32) Add(delta uint32) (new uint32) { return AddUint32(&x.v, delta) } +// And atomically performs a bitwise AND operation on x using the bitmask +// provided as mask and returns the old value. +func (x *Uint32) And(mask uint32) (old uint32) { return AndUint32(&x.v, mask) } + +// Or atomically performs a bitwise OR operation on x using the bitmask +// provided as mask and returns the old value. +func (x *Uint32) Or(mask uint32) (new uint32) { return OrUint32(&x.v, mask) } + // A Uint64 is an atomic uint64. The zero value is zero. type Uint64 struct { _ noCopy @@ -158,6 +182,14 @@ func (x *Uint64) CompareAndSwap(old, new uint64) (swapped bool) { // Add atomically adds delta to x and returns the new value. func (x *Uint64) Add(delta uint64) (new uint64) { return AddUint64(&x.v, delta) } +// And atomically performs a bitwise AND operation on x using the bitmask +// provided as mask and returns the old value. +func (x *Uint64) And(mask uint64) (old uint64) { return AndUint64(&x.v, mask) } + +// Or atomically performs a bitwise OR operation on x using the bitmask +// provided as mask and returns the old value. +func (x *Uint64) Or(mask uint64) (new uint64) { return OrUint64(&x.v, mask) } + // A Uintptr is an atomic uintptr. The zero value is zero. type Uintptr struct { _ noCopy @@ -181,6 +213,14 @@ func (x *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) { // Add atomically adds delta to x and returns the new value. func (x *Uintptr) Add(delta uintptr) (new uintptr) { return AddUintptr(&x.v, delta) } +// And atomically performs a bitwise AND operation on x using the bitmask +// provided as mask and returns the old value. +func (x *Uintptr) And(mask uintptr) (old uintptr) { return AndUintptr(&x.v, mask) } + +// Or atomically performs a bitwise OR operation on x using the bitmask +// provided as mask and returns the updated value after the OR operation. +func (x *Uintptr) Or(mask uintptr) (old uintptr) { return OrUintptr(&x.v, mask) } + // noCopy may be added to structs which must not be copied // after the first use. // -- 2.48.1