void __tsan_acquire(void);
void __tsan_release(void);
void __tsan_release_merge(void);
+void __tsan_go_ignore_sync_begin(void);
+void __tsan_go_ignore_sync_end(void);
// Mimic what cmd/cgo would do.
#pragma cgo_import_static __tsan_init
#pragma cgo_import_static __tsan_acquire
#pragma cgo_import_static __tsan_release
#pragma cgo_import_static __tsan_release_merge
+#pragma cgo_import_static __tsan_go_ignore_sync_begin
+#pragma cgo_import_static __tsan_go_ignore_sync_end
// These are called from race_amd64.s.
#pragma cgo_import_static __tsan_read
#pragma cgo_import_static __tsan_func_enter
#pragma cgo_import_static __tsan_func_exit
+#pragma cgo_import_static __tsan_go_atomic32_load
+#pragma cgo_import_static __tsan_go_atomic64_load
+#pragma cgo_import_static __tsan_go_atomic32_store
+#pragma cgo_import_static __tsan_go_atomic64_store
+#pragma cgo_import_static __tsan_go_atomic32_exchange
+#pragma cgo_import_static __tsan_go_atomic64_exchange
+#pragma cgo_import_static __tsan_go_atomic32_fetch_add
+#pragma cgo_import_static __tsan_go_atomic64_fetch_add
+#pragma cgo_import_static __tsan_go_atomic32_compare_exchange
+#pragma cgo_import_static __tsan_go_atomic64_compare_exchange
+
extern byte runtime·noptrdata[];
extern byte runtime·enoptrbss[];
runtime·racereleasemerge(addr);
}
-// func RaceSemacquire(s *uint32)
-void
-runtime·RaceSemacquire(uint32 *s)
-{
- runtime·semacquire(s, false);
-}
-
-// func RaceSemrelease(s *uint32)
-void
-runtime·RaceSemrelease(uint32 *s)
-{
- runtime·semrelease(s);
-}
-
// func RaceDisable()
void
runtime·RaceDisable(void)
{
- g->raceignore++;
+ if(g->raceignore++ == 0)
+ runtime·racecall(__tsan_go_ignore_sync_begin, g->racectx);
}
// func RaceEnable()
void
runtime·RaceEnable(void)
{
- g->raceignore--;
+ if(--g->raceignore == 0)
+ runtime·racecall(__tsan_go_ignore_sync_end, g->racectx);
}
typedef struct SymbolizeContext SymbolizeContext;
Tested with gcc 4.6.1 and 4.7.0. On Windows it's built with 64-bit MinGW.
-Current runtime is built on rev 210365.
+Current runtime is built on rev 215000.
x = 1
}
-// Races with non-atomic loads are not detected.
-func TestRaceFailingAtomicStoreLoad(t *testing.T) {
+func TestRaceAtomicStoreLoad(t *testing.T) {
c := make(chan bool)
var a uint64
go func() {
<-c
}
-// Races with non-atomic loads are not detected.
-func TestRaceFailingAtomicAddLoad(t *testing.T) {
+func TestRaceAtomicAddLoad(t *testing.T) {
c := make(chan bool)
var a uint64
go func() {
TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
MOVQ addr+0(FP), RARG1
MOVQ callpc+8(FP), RARG2
- MOVQ cp+16(FP), RARG3
+ MOVQ pc+16(FP), RARG3
// void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
MOVQ $__tsan_write_pc(SB), AX
JMP racecalladdr<>(SB)
MOVQ $__tsan_func_exit(SB), AX
JMP racecall<>(SB)
+// Atomic operations for sync/atomic package.
+
+// Load
+TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-0
+ MOVQ $__tsan_go_atomic32_load(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-0
+ MOVQ $__tsan_go_atomic64_load(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·LoadInt32(SB)
+
+TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·LoadInt64(SB)
+
+TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·LoadInt64(SB)
+
+TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·LoadInt64(SB)
+
+// Store
+TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-0
+ MOVQ $__tsan_go_atomic32_store(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-0
+ MOVQ $__tsan_go_atomic64_store(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·StoreInt32(SB)
+
+TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·StoreInt64(SB)
+
+TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·StoreInt64(SB)
+
+TEXT sync∕atomic·StorePointer(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·StoreInt64(SB)
+
+// Swap
+TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-0
+ MOVQ $__tsan_go_atomic32_exchange(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-0
+ MOVQ $__tsan_go_atomic64_exchange(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·SwapInt32(SB)
+
+TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·SwapInt64(SB)
+
+TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·SwapInt64(SB)
+
+TEXT sync∕atomic·SwapPointer(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·SwapInt64(SB)
+
+// Add
+TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-0
+ MOVQ $__tsan_go_atomic32_fetch_add(SB), AX
+ CALL racecallatomic<>(SB)
+ MOVL add+8(FP), AX // convert fetch_add to add_fetch
+ ADDL AX, ret+16(FP)
+ RET
+
+TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-0
+ MOVQ $__tsan_go_atomic64_fetch_add(SB), AX
+ CALL racecallatomic<>(SB)
+ MOVQ add+8(FP), AX // convert fetch_add to add_fetch
+ ADDQ AX, ret+16(FP)
+ RET
+
+TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·AddInt32(SB)
+
+TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·AddInt64(SB)
+
+TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·AddInt64(SB)
+
+TEXT sync∕atomic·AddPointer(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·AddInt64(SB)
+
+// CompareAndSwap
+TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-0
+ MOVQ $__tsan_go_atomic32_compare_exchange(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-0
+ MOVQ $__tsan_go_atomic64_compare_exchange(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·CompareAndSwapInt32(SB)
+
+TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·CompareAndSwapInt64(SB)
+
+TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·CompareAndSwapInt64(SB)
+
+TEXT sync∕atomic·CompareAndSwapPointer(SB), NOSPLIT, $0-0
+ JMP sync∕atomic·CompareAndSwapInt64(SB)
+
+// Generic atomic operation implementation.
+// AX already contains target function.
+TEXT racecallatomic<>(SB), NOSPLIT, $0-0
+ // Trigger SIGSEGV early.
+ MOVQ 16(SP), R12
+ MOVL (R12), R12
+ get_tls(R12)
+ MOVQ g(R12), R14
+ MOVQ g_racectx(R14), RARG0 // goroutine context
+ MOVQ 8(SP), RARG1 // caller pc
+ MOVQ (SP), RARG2 // pc
+ LEAQ 16(SP), RARG3 // arguments
+ JMP racecall<>(SB)
+
// void runtime·racecall(void(*f)(...), ...)
// Calls C function f from race runtime and passes up to 4 arguments to it.
// The arguments are never heap-object-preserving pointers, so we pretend there are no arguments.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !race
-
// Package atomic provides low-level atomic memory primitives
// useful for implementing synchronization algorithms.
//
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build race
-
-package atomic
-
-import (
- "runtime"
- "unsafe"
-)
-
-// We use runtime.RaceRead() inside of atomic operations to catch races
-// between atomic and non-atomic operations. It will also catch races
-// between Mutex.Lock() and mutex overwrite (mu = Mutex{}). Since we use
-// only RaceRead() we won't catch races with non-atomic loads.
-// Otherwise (if we use RaceWrite()) we will report races
-// between atomic operations (false positives).
-
-var mtx uint32 = 1 // same for all
-
-func SwapInt32(addr *int32, new int32) (old int32) {
- return int32(SwapUint32((*uint32)(unsafe.Pointer(addr)), uint32(new)))
-}
-
-func SwapUint32(addr *uint32, new uint32) (old uint32) {
- _ = *addr
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(addr))
- runtime.RaceAcquire(unsafe.Pointer(addr))
- old = *addr
- *addr = new
- runtime.RaceReleaseMerge(unsafe.Pointer(addr))
- runtime.RaceSemrelease(&mtx)
- return
-}
-
-func SwapInt64(addr *int64, new int64) (old int64) {
- return int64(SwapUint64((*uint64)(unsafe.Pointer(addr)), uint64(new)))
-}
-
-func SwapUint64(addr *uint64, new uint64) (old uint64) {
- _ = *addr
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(addr))
- runtime.RaceAcquire(unsafe.Pointer(addr))
- old = *addr
- *addr = new
- runtime.RaceReleaseMerge(unsafe.Pointer(addr))
- runtime.RaceSemrelease(&mtx)
- return
-}
-
-func SwapUintptr(addr *uintptr, new uintptr) (old uintptr) {
- return uintptr(SwapPointer((*unsafe.Pointer)(unsafe.Pointer(addr)), unsafe.Pointer(new)))
-}
-
-func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer) {
- _ = *addr
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(addr))
- runtime.RaceAcquire(unsafe.Pointer(addr))
- old = *addr
- *addr = new
- runtime.RaceReleaseMerge(unsafe.Pointer(addr))
- runtime.RaceSemrelease(&mtx)
- return
-}
-
-func CompareAndSwapInt32(val *int32, old, new int32) bool {
- return CompareAndSwapUint32((*uint32)(unsafe.Pointer(val)), uint32(old), uint32(new))
-}
-
-func CompareAndSwapUint32(val *uint32, old, new uint32) (swapped bool) {
- _ = *val
- swapped = false
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(val))
- runtime.RaceAcquire(unsafe.Pointer(val))
- if *val == old {
- *val = new
- swapped = true
- runtime.RaceReleaseMerge(unsafe.Pointer(val))
- }
- runtime.RaceSemrelease(&mtx)
- return
-}
-
-func CompareAndSwapInt64(val *int64, old, new int64) bool {
- return CompareAndSwapUint64((*uint64)(unsafe.Pointer(val)), uint64(old), uint64(new))
-}
-
-func CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool) {
- _ = *val
- swapped = false
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(val))
- runtime.RaceAcquire(unsafe.Pointer(val))
- if *val == old {
- *val = new
- swapped = true
- runtime.RaceReleaseMerge(unsafe.Pointer(val))
- }
- runtime.RaceSemrelease(&mtx)
- return
-}
-
-func CompareAndSwapPointer(val *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool) {
- _ = *val
- swapped = false
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(val))
- runtime.RaceAcquire(unsafe.Pointer(val))
- if *val == old {
- *val = new
- swapped = true
- runtime.RaceReleaseMerge(unsafe.Pointer(val))
- }
- runtime.RaceSemrelease(&mtx)
- return
-}
-
-func CompareAndSwapUintptr(val *uintptr, old, new uintptr) (swapped bool) {
- _ = *val
- swapped = false
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(val))
- runtime.RaceAcquire(unsafe.Pointer(val))
- if *val == old {
- *val = new
- swapped = true
- runtime.RaceReleaseMerge(unsafe.Pointer(val))
- }
- runtime.RaceSemrelease(&mtx)
- return
-}
-
-func AddInt32(val *int32, delta int32) int32 {
- return int32(AddUint32((*uint32)(unsafe.Pointer(val)), uint32(delta)))
-}
-
-func AddUint32(val *uint32, delta uint32) (new uint32) {
- _ = *val
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(val))
- runtime.RaceAcquire(unsafe.Pointer(val))
- *val = *val + delta
- new = *val
- runtime.RaceReleaseMerge(unsafe.Pointer(val))
- runtime.RaceSemrelease(&mtx)
-
- return
-}
-
-func AddInt64(val *int64, delta int64) int64 {
- return int64(AddUint64((*uint64)(unsafe.Pointer(val)), uint64(delta)))
-}
-
-func AddUint64(val *uint64, delta uint64) (new uint64) {
- _ = *val
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(val))
- runtime.RaceAcquire(unsafe.Pointer(val))
- *val = *val + delta
- new = *val
- runtime.RaceReleaseMerge(unsafe.Pointer(val))
- runtime.RaceSemrelease(&mtx)
-
- return
-}
-
-func AddUintptr(val *uintptr, delta uintptr) (new uintptr) {
- _ = *val
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(val))
- runtime.RaceAcquire(unsafe.Pointer(val))
- *val = *val + delta
- new = *val
- runtime.RaceReleaseMerge(unsafe.Pointer(val))
- runtime.RaceSemrelease(&mtx)
-
- return
-}
-
-func LoadInt32(addr *int32) int32 {
- return int32(LoadUint32((*uint32)(unsafe.Pointer(addr))))
-}
-
-func LoadUint32(addr *uint32) (val uint32) {
- _ = *addr
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(addr))
- runtime.RaceAcquire(unsafe.Pointer(addr))
- val = *addr
- runtime.RaceSemrelease(&mtx)
- return
-}
-
-func LoadInt64(addr *int64) int64 {
- return int64(LoadUint64((*uint64)(unsafe.Pointer(addr))))
-}
-
-func LoadUint64(addr *uint64) (val uint64) {
- _ = *addr
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(addr))
- runtime.RaceAcquire(unsafe.Pointer(addr))
- val = *addr
- runtime.RaceSemrelease(&mtx)
- return
-}
-
-func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer) {
- _ = *addr
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(addr))
- runtime.RaceAcquire(unsafe.Pointer(addr))
- val = *addr
- runtime.RaceSemrelease(&mtx)
- return
-}
-
-func LoadUintptr(addr *uintptr) (val uintptr) {
- _ = *addr
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(addr))
- runtime.RaceAcquire(unsafe.Pointer(addr))
- val = *addr
- runtime.RaceSemrelease(&mtx)
- return
-}
-
-func StoreInt32(addr *int32, val int32) {
- StoreUint32((*uint32)(unsafe.Pointer(addr)), uint32(val))
-}
-
-func StoreUint32(addr *uint32, val uint32) {
- _ = *addr
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(addr))
- *addr = val
- runtime.RaceRelease(unsafe.Pointer(addr))
- runtime.RaceSemrelease(&mtx)
-}
-
-func StoreInt64(addr *int64, val int64) {
- StoreUint64((*uint64)(unsafe.Pointer(addr)), uint64(val))
-}
-
-func StoreUint64(addr *uint64, val uint64) {
- _ = *addr
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(addr))
- *addr = val
- runtime.RaceRelease(unsafe.Pointer(addr))
- runtime.RaceSemrelease(&mtx)
-}
-
-func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer) {
- _ = *addr
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(addr))
- *addr = val
- runtime.RaceRelease(unsafe.Pointer(addr))
- runtime.RaceSemrelease(&mtx)
-}
-
-func StoreUintptr(addr *uintptr, val uintptr) {
- _ = *addr
- runtime.RaceSemacquire(&mtx)
- runtime.RaceRead(unsafe.Pointer(addr))
- *addr = val
- runtime.RaceRelease(unsafe.Pointer(addr))
- runtime.RaceSemrelease(&mtx)
-}
--- /dev/null
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build race
+
+// This file is here only to allow external functions.
+// The operations are implemented in src/pkg/runtime/race_amd64.s