--- /dev/null
+pkg sync/atomic, func AndInt32(*int32, int32) int32 #61395
+pkg sync/atomic, func AndInt64(*int64, int64) int64 #61395
+pkg sync/atomic, func AndUint32(*uint32, uint32) uint32 #61395
+pkg sync/atomic, func AndUint64(*uint64, uint64) uint64 #61395
+pkg sync/atomic, func AndUintptr(*uintptr, uintptr) uintptr #61395
+pkg sync/atomic, func OrInt32(*int32, int32) int32 #61395
+pkg sync/atomic, func OrInt64(*int64, int64) int64 #61395
+pkg sync/atomic, func OrUint32(*uint32, uint32) uint32 #61395
+pkg sync/atomic, func OrUint64(*uint64, uint64) uint64 #61395
+pkg sync/atomic, func OrUintptr(*uintptr, uintptr) uintptr #61395
+pkg sync/atomic, method (*Int32) And(int32) int32 #61395
+pkg sync/atomic, method (*Int64) And(int64) int64 #61395
+pkg sync/atomic, method (*Uint32) And(uint32) uint32 #61395
+pkg sync/atomic, method (*Uint64) And(uint64) uint64 #61395
+pkg sync/atomic, method (*Uintptr) And(uintptr) uintptr #61395
+pkg sync/atomic, method (*Int32) Or(int32) int32 #61395
+pkg sync/atomic, method (*Int64) Or(int64) int64 #61395
+pkg sync/atomic, method (*Uint32) Or(uint32) uint32 #61395
+pkg sync/atomic, method (*Uint64) Or(uint64) uint64 #61395
+pkg sync/atomic, method (*Uintptr) Or(uintptr) uintptr #61395
--- /dev/null
+<!-- Issue #61395 -->
+The new [`atomic.And`](/pkg/sync/atomic#And) and [`atomic.Or`](/pkg/sync/atomic#Or)
+operators apply a bitwise `AND` or `OR` to the given input, returning the old value.
//go:cgo_import_static __tsan_go_atomic64_exchange
//go:cgo_import_static __tsan_go_atomic32_fetch_add
//go:cgo_import_static __tsan_go_atomic64_fetch_add
+//go:cgo_import_static __tsan_go_atomic32_fetch_and
+//go:cgo_import_static __tsan_go_atomic64_fetch_and
+//go:cgo_import_static __tsan_go_atomic32_fetch_or
+//go:cgo_import_static __tsan_go_atomic64_fetch_or
//go:cgo_import_static __tsan_go_atomic32_compare_exchange
//go:cgo_import_static __tsan_go_atomic64_compare_exchange
//go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr
func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
+//go:linkname abigen_sync_atomic_AndInt32 sync/atomic.AndInt32
+func abigen_sync_atomic_AndInt32(addr *int32, mask int32) (old int32)
+
+//go:linkname abigen_sync_atomic_AndUint32 sync/atomic.AndUint32
+func abigen_sync_atomic_AndUint32(addr *uint32, mask uint32) (old uint32)
+
+//go:linkname abigen_sync_atomic_AndInt64 sync/atomic.AndInt64
+func abigen_sync_atomic_AndInt64(addr *int64, mask int64) (old int64)
+
+//go:linkname abigen_sync_atomic_AndUint64 sync/atomic.AndUint64
+func abigen_sync_atomic_AndUint64(addr *uint64, mask uint64) (old uint64)
+
+//go:linkname abigen_sync_atomic_AndUintptr sync/atomic.AndUintptr
+func abigen_sync_atomic_AndUintptr(addr *uintptr, mask uintptr) (old uintptr)
+
+//go:linkname abigen_sync_atomic_OrInt32 sync/atomic.OrInt32
+func abigen_sync_atomic_OrInt32(addr *int32, mask int32) (old int32)
+
+//go:linkname abigen_sync_atomic_OrUint32 sync/atomic.OrUint32
+func abigen_sync_atomic_OrUint32(addr *uint32, mask uint32) (old uint32)
+
+//go:linkname abigen_sync_atomic_OrInt64 sync/atomic.OrInt64
+func abigen_sync_atomic_OrInt64(addr *int64, mask int64) (old int64)
+
+//go:linkname abigen_sync_atomic_OrUint64 sync/atomic.OrUint64
+func abigen_sync_atomic_OrUint64(addr *uint64, mask uint64) (old uint64)
+
+//go:linkname abigen_sync_atomic_OrUintptr sync/atomic.OrUintptr
+func abigen_sync_atomic_OrUintptr(addr *uintptr, mask uintptr) (old uintptr)
+
//go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32
func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
GO_ARGS
JMP sync∕atomic·AddInt64(SB)
+// And
+TEXT sync∕atomic·AndInt32(SB), NOSPLIT|NOFRAME, $0-20
+ GO_ARGS
+ MOVQ $__tsan_go_atomic32_fetch_and(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·AndInt64(SB), NOSPLIT|NOFRAME, $0-24
+ GO_ARGS
+ MOVQ $__tsan_go_atomic64_fetch_and(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·AndUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·AndInt32(SB)
+
+TEXT sync∕atomic·AndUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AndInt64(SB)
+
+TEXT sync∕atomic·AndUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AndInt64(SB)
+
+// Or
+TEXT sync∕atomic·OrInt32(SB), NOSPLIT|NOFRAME, $0-20
+ GO_ARGS
+ MOVQ $__tsan_go_atomic32_fetch_or(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·OrInt64(SB), NOSPLIT|NOFRAME, $0-24
+ GO_ARGS
+ MOVQ $__tsan_go_atomic64_fetch_or(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·OrUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·OrInt32(SB)
+
+TEXT sync∕atomic·OrUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·OrInt64(SB)
+
+TEXT sync∕atomic·OrUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·OrInt64(SB)
+
+
// CompareAndSwap
TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT|NOFRAME, $0-17
GO_ARGS
GO_ARGS
JMP sync∕atomic·AddInt64(SB)
+// And
+TEXT sync∕atomic·AndInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_fetch_and(SB), R9
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·AndInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_fetch_and(SB), R9
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·AndUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·AndInt32(SB)
+
+TEXT sync∕atomic·AndUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AndInt64(SB)
+
+TEXT sync∕atomic·AndUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AndInt64(SB)
+
+// Or
+TEXT sync∕atomic·OrInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_fetch_or(SB), R9
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·OrInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_fetch_or(SB), R9
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·OrUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·OrInt32(SB)
+
+TEXT sync∕atomic·OrUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·OrInt64(SB)
+
+TEXT sync∕atomic·OrUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·OrInt64(SB)
+
// CompareAndSwap
TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
GO_ARGS
GO_ARGS
BR sync∕atomic·AddInt64(SB)
+// And
+TEXT sync∕atomic·AndInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_fetch_and(SB), R8
+ BR racecallatomic<>(SB)
+
+TEXT sync∕atomic·AndInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_fetch_and(SB), R8
+ BR racecallatomic<>(SB)
+
+TEXT sync∕atomic·AndUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ BR sync∕atomic·AndInt32(SB)
+
+TEXT sync∕atomic·AndUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ BR sync∕atomic·AndInt64(SB)
+
+TEXT sync∕atomic·AndUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ BR sync∕atomic·AndInt64(SB)
+
+// Or
+TEXT sync∕atomic·OrInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_fetch_or(SB), R8
+ BR racecallatomic<>(SB)
+
+TEXT sync∕atomic·OrInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_fetch_or(SB), R8
+ BR racecallatomic<>(SB)
+
+TEXT sync∕atomic·OrUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ BR sync∕atomic·OrInt32(SB)
+
+TEXT sync∕atomic·OrUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ BR sync∕atomic·OrInt64(SB)
+
+TEXT sync∕atomic·OrUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ BR sync∕atomic·OrInt64(SB)
+
// CompareAndSwap in tsan
TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
GO_ARGS
GO_ARGS
JMP sync∕atomic·AddInt64(SB)
+// And
+TEXT sync∕atomic·AndInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_fetch_and(SB), R1
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·AndInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_fetch_and(SB), R1
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·AndUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·AndInt32(SB)
+
+TEXT sync∕atomic·AndUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AndInt64(SB)
+
+TEXT sync∕atomic·AndUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AndInt64(SB)
+
+// Or
+TEXT sync∕atomic·OrInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_fetch_or(SB), R1
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·OrInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_fetch_or(SB), R1
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·OrUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·OrInt32(SB)
+
+TEXT sync∕atomic·OrUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·OrInt64(SB)
+
+TEXT sync∕atomic·OrUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·OrInt64(SB)
+
// CompareAndSwap
TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
TEXT ·StoreUintptr(SB),NOSPLIT,$0
JMP internal∕runtime∕atomic·Storeuintptr(SB)
+
+TEXT ·AndInt32(SB),NOSPLIT,$0
+ JMP internal∕runtime∕atomic·And32(SB)
+
+TEXT ·AndUint32(SB),NOSPLIT,$0
+ JMP internal∕runtime∕atomic·And32(SB)
+
+TEXT ·AndUintptr(SB),NOSPLIT,$0
+ JMP internal∕runtime∕atomic·Anduintptr(SB)
+
+TEXT ·AndInt64(SB),NOSPLIT,$0
+ JMP internal∕runtime∕atomic·And64(SB)
+
+TEXT ·AndUint64(SB),NOSPLIT,$0
+ JMP internal∕runtime∕atomic·And64(SB)
+
+TEXT ·OrInt32(SB),NOSPLIT,$0
+ JMP internal∕runtime∕atomic·Or32(SB)
+
+TEXT ·OrUint32(SB),NOSPLIT,$0
+ JMP internal∕runtime∕atomic·Or32(SB)
+
+TEXT ·OrUintptr(SB),NOSPLIT,$0
+ JMP internal∕runtime∕atomic·Oruintptr(SB)
+
+TEXT ·OrInt64(SB),NOSPLIT,$0
+ JMP internal∕runtime∕atomic·Or64(SB)
+
+TEXT ·OrUint64(SB),NOSPLIT,$0
+ JMP internal∕runtime∕atomic·Or64(SB)
}
}
+func TestAndInt32(t *testing.T) {
+ var x struct {
+ before int32
+ i int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ x.i = -1
+ j := x.i
+ for mask := int32(1); mask != 0; mask <<= 1 {
+ old := x.i
+ k := AndInt32(&x.i, ^mask)
+ j &= ^mask
+ if x.i != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestAndInt32Method(t *testing.T) {
+ var x struct {
+ before int32
+ i Int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ x.i.Store(-1)
+ j := x.i.Load()
+ for mask := int32(1); mask != 0; mask <<= 1 {
+ old := x.i.Load()
+ k := x.i.And(^mask)
+ j &= ^mask
+ if x.i.Load() != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestAndUint32(t *testing.T) {
+ var x struct {
+ before uint32
+ i uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ x.i = 0xffffffff
+ j := x.i
+ for mask := uint32(1); mask != 0; mask <<= 1 {
+ old := x.i
+ k := AndUint32(&x.i, ^mask)
+ j &= ^mask
+ if x.i != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestAndUint32Method(t *testing.T) {
+ var x struct {
+ before uint32
+ i Uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ x.i.Store(0xffffffff)
+ j := x.i.Load()
+ for mask := uint32(1); mask != 0; mask <<= 1 {
+ old := x.i.Load()
+ k := x.i.And(^mask)
+ j &= ^mask
+ if x.i.Load() != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestAndInt64(t *testing.T) {
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ x.i = -1
+ j := x.i
+ for mask := int64(1); mask != 0; mask <<= 1 {
+ old := x.i
+ k := AndInt64(&x.i, ^mask)
+ j &= ^mask
+ if x.i != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestAndInt64Method(t *testing.T) {
+ var x struct {
+ before int64
+ i Int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ x.i.Store(-1)
+ j := x.i.Load()
+ for mask := int64(1); mask != 0; mask <<= 1 {
+ old := x.i.Load()
+ k := x.i.And(^mask)
+ j &= ^mask
+ if x.i.Load() != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestAndUint64(t *testing.T) {
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ x.i = 0xfffffffffffffff
+ j := x.i
+ for mask := uint64(1); mask != 0; mask <<= 1 {
+ old := x.i
+ k := AndUint64(&x.i, ^mask)
+ j &= ^mask
+ if x.i != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestAndUint64Method(t *testing.T) {
+ var x struct {
+ before uint64
+ i Uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ x.i.Store(0xfffffffffffffff)
+ j := x.i.Load()
+ for mask := uint64(1); mask != 0; mask <<= 1 {
+ old := x.i.Load()
+ k := x.i.And(^mask)
+ j &= ^mask
+ if x.i.Load() != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestAndUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ x.i = ^uintptr(0)
+ j := x.i
+ for mask := uintptr(1); mask != 0; mask <<= 1 {
+ old := x.i
+ k := AndUintptr(&x.i, ^mask)
+ j &= ^mask
+ if x.i != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestAndUintptrMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ x.i.Store(^uintptr(0))
+ j := x.i.Load()
+ for mask := uintptr(1); mask != 0; mask <<= 1 {
+ old := x.i.Load()
+ k := x.i.And(^mask)
+ j &= ^mask
+ if x.i.Load() != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestOrInt32(t *testing.T) {
+ var x struct {
+ before int32
+ i int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j int32
+ for mask := int32(1); mask != 0; mask <<= 1 {
+ old := x.i
+ k := OrInt32(&x.i, mask)
+ j |= mask
+ if x.i != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestOrInt32Method(t *testing.T) {
+ var x struct {
+ before int32
+ i Int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j int32
+ for mask := int32(1); mask != 0; mask <<= 1 {
+ old := x.i.Load()
+ k := x.i.Or(mask)
+ j |= mask
+ if x.i.Load() != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestOrUint32(t *testing.T) {
+ var x struct {
+ before uint32
+ i uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j uint32
+ for mask := uint32(1); mask != 0; mask <<= 1 {
+ old := x.i
+ k := OrUint32(&x.i, mask)
+ j |= mask
+ if x.i != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestOrUint32Method(t *testing.T) {
+ var x struct {
+ before uint32
+ i Uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j uint32
+ for mask := uint32(1); mask != 0; mask <<= 1 {
+ old := x.i.Load()
+ k := x.i.Or(mask)
+ j |= mask
+ if x.i.Load() != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestOrInt64(t *testing.T) {
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j int64
+ for mask := int64(1); mask != 0; mask <<= 1 {
+ old := x.i
+ k := OrInt64(&x.i, mask)
+ j |= mask
+ if x.i != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestOrInt64Method(t *testing.T) {
+ var x struct {
+ before int64
+ i Int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j int64
+ for mask := int64(1); mask != 0; mask <<= 1 {
+ old := x.i.Load()
+ k := x.i.Or(mask)
+ j |= mask
+ if x.i.Load() != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestOrUint64(t *testing.T) {
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j uint64
+ for mask := uint64(1); mask != 0; mask <<= 1 {
+ old := x.i
+ k := OrUint64(&x.i, mask)
+ j |= mask
+ if x.i != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestOrUint64Method(t *testing.T) {
+ var x struct {
+ before uint64
+ i Uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j uint64
+ for mask := uint64(1); mask != 0; mask <<= 1 {
+ old := x.i.Load()
+ k := x.i.Or(mask)
+ j |= mask
+ if x.i.Load() != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestOrUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ var j uintptr
+ for mask := uintptr(1); mask != 0; mask <<= 1 {
+ old := x.i
+ k := OrUintptr(&x.i, mask)
+ j |= mask
+ if x.i != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestOrUintptrMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ var j uintptr
+ for mask := uintptr(1); mask != 0; mask <<= 1 {
+ old := x.i.Load()
+ k := x.i.Or(mask)
+ j |= mask
+ if x.i.Load() != j || k != old {
+ t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i.Load(), j, k, old)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
func TestCompareAndSwapInt32(t *testing.T) {
var x struct {
before int32
// Consider using the more ergonomic and less error-prone [Uintptr.Add] instead.
func AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
+// AndInt32 atomically performs a bitwise AND operation on *addr using the bitmask provided as mask
+// and returns the old value.
+// Consider using the more ergonomic and less error-prone [Int32.And] instead.
+func AndInt32(addr *int32, mask int32) (old int32)
+
+// AndUint32 atomically performs a bitwise AND operation on *addr using the bitmask provided as mask
+// and returns the old value.
+// Consider using the more ergonomic and less error-prone [Uint32.And] instead.
+func AndUint32(addr *uint32, mask uint32) (old uint32)
+
+// AndInt64 atomically performs a bitwise AND operation on *addr using the bitmask provided as mask
+// and returns the old value.
+// Consider using the more ergonomic and less error-prone [Int64.And] instead.
+func AndInt64(addr *int64, mask int64) (old int64)
+
+// AndUint64 atomically performs a bitwise AND operation on *addr using the bitmask provided as mask
+// and returns the old.
+// Consider using the more ergonomic and less error-prone [Uint64.And] instead.
+func AndUint64(addr *uint64, mask uint64) (old uint64)
+
+// AndUintptr atomically performs a bitwise AND operation on *addr using the bitmask provided as mask
+// and returns the old value.
+// Consider using the more ergonomic and less error-prone [Uintptr.And] instead.
+func AndUintptr(addr *uintptr, mask uintptr) (old uintptr)
+
+// OrInt32 atomically performs a bitwise OR operation on *addr using the bitmask provided as mask
+// and returns the old value.
+// Consider using the more ergonomic and less error-prone [Int32.Or] instead.
+func OrInt32(addr *int32, mask int32) (old int32)
+
+// OrUint32 atomically performs a bitwise OR operation on *addr using the bitmask provided as mask
+// and returns the old value.
+// Consider using the more ergonomic and less error-prone [Uint32.Or] instead.
+func OrUint32(addr *uint32, mask uint32) (old uint32)
+
+// OrInt64 atomically performs a bitwise OR operation on *addr using the bitmask provided as mask
+// and returns the old value.
+// Consider using the more ergonomic and less error-prone [Int64.Or] instead.
+func OrInt64(addr *int64, mask int64) (old int64)
+
+// OrUint64 atomically performs a bitwise OR operation on *addr using the bitmask provided as mask
+// and returns the old value.
+// Consider using the more ergonomic and less error-prone [Uint64.Or] instead.
+func OrUint64(addr *uint64, mask uint64) (old uint64)
+
+// OrUintptr atomically performs a bitwise OR operation on *addr using the bitmask provided as mask
+// and returns the old value.
+// Consider using the more ergonomic and less error-prone [Uintptr.Or] instead.
+func OrUintptr(addr *uintptr, mask uintptr) (old uintptr)
+
// LoadInt32 atomically loads *addr.
// Consider using the more ergonomic and less error-prone [Int32.Load] instead.
func LoadInt32(addr *int32) (val int32)
// Add atomically adds delta to x and returns the new value.
func (x *Int32) Add(delta int32) (new int32) { return AddInt32(&x.v, delta) }
+// And atomically performs a bitwise AND operation on x using the bitmask
+// provided as mask and returns the old value.
+func (x *Int32) And(mask int32) (old int32) { return AndInt32(&x.v, mask) }
+
+// Or atomically performs a bitwise OR operation on x using the bitmask
+// provided as mask and returns the old value.
+func (x *Int32) Or(mask int32) (old int32) { return OrInt32(&x.v, mask) }
+
// An Int64 is an atomic int64. The zero value is zero.
type Int64 struct {
_ noCopy
// Add atomically adds delta to x and returns the new value.
func (x *Int64) Add(delta int64) (new int64) { return AddInt64(&x.v, delta) }
+// And atomically performs a bitwise AND operation on x using the bitmask
+// provided as mask and returns the old value.
+func (x *Int64) And(mask int64) (old int64) { return AndInt64(&x.v, mask) }
+
+// Or atomically performs a bitwise OR operation on x using the bitmask
+// provided as mask and returns the old value.
+func (x *Int64) Or(mask int64) (old int64) { return OrInt64(&x.v, mask) }
+
// A Uint32 is an atomic uint32. The zero value is zero.
type Uint32 struct {
_ noCopy
// Add atomically adds delta to x and returns the new value.
func (x *Uint32) Add(delta uint32) (new uint32) { return AddUint32(&x.v, delta) }
+// And atomically performs a bitwise AND operation on x using the bitmask
+// provided as mask and returns the old value.
+func (x *Uint32) And(mask uint32) (old uint32) { return AndUint32(&x.v, mask) }
+
+// Or atomically performs a bitwise OR operation on x using the bitmask
+// provided as mask and returns the old value.
+func (x *Uint32) Or(mask uint32) (new uint32) { return OrUint32(&x.v, mask) }
+
// A Uint64 is an atomic uint64. The zero value is zero.
type Uint64 struct {
_ noCopy
// Add atomically adds delta to x and returns the new value.
func (x *Uint64) Add(delta uint64) (new uint64) { return AddUint64(&x.v, delta) }
+// And atomically performs a bitwise AND operation on x using the bitmask
+// provided as mask and returns the old value.
+func (x *Uint64) And(mask uint64) (old uint64) { return AndUint64(&x.v, mask) }
+
+// Or atomically performs a bitwise OR operation on x using the bitmask
+// provided as mask and returns the old value.
+func (x *Uint64) Or(mask uint64) (new uint64) { return OrUint64(&x.v, mask) }
+
// A Uintptr is an atomic uintptr. The zero value is zero.
type Uintptr struct {
_ noCopy
// Add atomically adds delta to x and returns the new value.
func (x *Uintptr) Add(delta uintptr) (new uintptr) { return AddUintptr(&x.v, delta) }
+// And atomically performs a bitwise AND operation on x using the bitmask
+// provided as mask and returns the old value.
+func (x *Uintptr) And(mask uintptr) (old uintptr) { return AndUintptr(&x.v, mask) }
+
+// Or atomically performs a bitwise OR operation on x using the bitmask
+// provided as mask and returns the updated value after the OR operation.
+func (x *Uintptr) Or(mask uintptr) (old uintptr) { return OrUintptr(&x.v, mask) }
+
// noCopy may be added to structs which must not be copied
// after the first use.
//