"errors": {},
"io": {"errors", "sync"},
"runtime": {"unsafe"},
- "sync": {"sync/atomic"},
+ "sync": {"sync/atomic", "unsafe"},
"sync/atomic": {"unsafe"},
"unsafe": {},
// c.L.Unlock()
//
func (c *Cond) Wait() {
+ if raceenabled {
+ raceDisable()
+ }
c.m.Lock()
if c.newSema == nil {
c.newSema = new(uint32)
s := c.newSema
c.newWaiters++
c.m.Unlock()
+ if raceenabled {
+ raceEnable()
+ }
c.L.Unlock()
runtime_Semacquire(s)
c.L.Lock()
// It is allowed but not required for the caller to hold c.L
// during the call.
func (c *Cond) Signal() {
+ if raceenabled {
+ raceDisable()
+ }
c.m.Lock()
if c.oldWaiters == 0 && c.newWaiters > 0 {
// Retire old generation; rename new to old.
runtime_Semrelease(c.oldSema)
}
c.m.Unlock()
+ if raceenabled {
+ raceEnable()
+ }
}
// Broadcast wakes all goroutines waiting on c.
// It is allowed but not required for the caller to hold c.L
// during the call.
func (c *Cond) Broadcast() {
+ if raceenabled {
+ raceDisable()
+ }
c.m.Lock()
// Wake both generations.
if c.oldWaiters > 0 {
c.newSema = nil
}
c.m.Unlock()
+ if raceenabled {
+ raceEnable()
+ }
}
// Values containing the types defined in this package should not be copied.
package sync
-import "sync/atomic"
+import (
+ "sync/atomic"
+ "unsafe"
+)
// A Mutex is a mutual exclusion lock.
// Mutexes can be created as part of other structures;
func (m *Mutex) Lock() {
// Fast path: grab unlocked mutex.
if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
+ if raceenabled {
+ raceAcquire(unsafe.Pointer(m))
+ }
return
}
awoke = true
}
}
+
+ if raceenabled {
+ raceAcquire(unsafe.Pointer(m))
+ }
}
// Unlock unlocks m.
// It is allowed for one goroutine to lock a Mutex and then
// arrange for another goroutine to unlock it.
func (m *Mutex) Unlock() {
+ if raceenabled {
+ raceRelease(unsafe.Pointer(m))
+ }
+
// Fast path: drop lock bit.
new := atomic.AddInt32(&m.state, -mutexLocked)
if (new+mutexLocked)&mutexLocked == 0 {
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build race
+
+package sync
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+const raceenabled = true
+
+func raceAcquire(addr unsafe.Pointer) {
+ runtime.RaceAcquire(addr)
+}
+
+func raceRelease(addr unsafe.Pointer) {
+ runtime.RaceRelease(addr)
+}
+
+func raceReleaseMerge(addr unsafe.Pointer) {
+ runtime.RaceReleaseMerge(addr)
+}
+
+func raceDisable() {
+ runtime.RaceDisable()
+}
+
+func raceEnable() {
+ runtime.RaceEnable()
+}
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !race
+
+package sync
+
+import (
+ "unsafe"
+)
+
+const raceenabled = false
+
+func raceAcquire(addr unsafe.Pointer) {
+}
+
+func raceRelease(addr unsafe.Pointer) {
+}
+
+func raceReleaseMerge(addr unsafe.Pointer) {
+}
+
+func raceDisable() {
+}
+
+func raceEnable() {
+}
package sync
-import "sync/atomic"
+import (
+ "sync/atomic"
+ "unsafe"
+)
// An RWMutex is a reader/writer mutual exclusion lock.
// The lock can be held by an arbitrary number of readers
// RLock locks rw for reading.
func (rw *RWMutex) RLock() {
+ if raceenabled {
+ raceDisable()
+ }
if atomic.AddInt32(&rw.readerCount, 1) < 0 {
// A writer is pending, wait for it.
runtime_Semacquire(&rw.readerSem)
}
+ if raceenabled {
+ raceEnable()
+ raceAcquire(unsafe.Pointer(&rw.readerSem))
+ }
}
// RUnlock undoes a single RLock call;
// It is a run-time error if rw is not locked for reading
// on entry to RUnlock.
func (rw *RWMutex) RUnlock() {
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(&rw.writerSem))
+ raceDisable()
+ }
if atomic.AddInt32(&rw.readerCount, -1) < 0 {
// A writer is pending.
if atomic.AddInt32(&rw.readerWait, -1) == 0 {
runtime_Semrelease(&rw.writerSem)
}
}
+ if raceenabled {
+ raceEnable()
+ }
}
// Lock locks rw for writing.
// a blocked Lock call excludes new readers from acquiring
// the lock.
func (rw *RWMutex) Lock() {
+ if raceenabled {
+ raceDisable()
+ }
// First, resolve competition with other writers.
rw.w.Lock()
// Announce to readers there is a pending writer.
if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
runtime_Semacquire(&rw.writerSem)
}
+ if raceenabled {
+ raceEnable()
+ raceAcquire(unsafe.Pointer(&rw.readerSem))
+ raceAcquire(unsafe.Pointer(&rw.writerSem))
+ }
}
// Unlock unlocks rw for writing. It is a run-time error if rw is
// goroutine. One goroutine may RLock (Lock) an RWMutex and then
// arrange for another goroutine to RUnlock (Unlock) it.
func (rw *RWMutex) Unlock() {
+ if raceenabled {
+ raceRelease(unsafe.Pointer(&rw.readerSem))
+ raceRelease(unsafe.Pointer(&rw.writerSem))
+ raceDisable()
+ }
+
// Announce to readers there is no active writer.
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
// Unblock blocked readers, if any.
}
// Allow other writers to proceed.
rw.w.Unlock()
+ if raceenabled {
+ raceEnable()
+ }
}
// RLocker returns a Locker interface that implements
package sync
-import "sync/atomic"
+import (
+ "sync/atomic"
+ "unsafe"
+)
// A WaitGroup waits for a collection of goroutines to finish.
// The main goroutine calls Add to set the number of
// If the counter becomes zero, all goroutines blocked on Wait() are released.
// If the counter goes negative, Add panics.
func (wg *WaitGroup) Add(delta int) {
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(wg))
+ raceDisable()
+ defer raceEnable()
+ }
v := atomic.AddInt32(&wg.counter, int32(delta))
if v < 0 {
panic("sync: negative WaitGroup counter")
// Wait blocks until the WaitGroup counter is zero.
func (wg *WaitGroup) Wait() {
+ if raceenabled {
+ raceDisable()
+ }
if atomic.LoadInt32(&wg.counter) == 0 {
+ if raceenabled {
+ raceEnable()
+ raceAcquire(unsafe.Pointer(wg))
+ }
return
}
wg.m.Lock()
// to avoid missing an Add.
if atomic.LoadInt32(&wg.counter) == 0 {
atomic.AddInt32(&wg.waiters, -1)
+ if raceenabled {
+ raceEnable()
+ raceAcquire(unsafe.Pointer(wg))
+ raceDisable()
+ }
wg.m.Unlock()
+ if raceenabled {
+ raceEnable()
+ }
return
}
if wg.sema == nil {
s := wg.sema
wg.m.Unlock()
runtime_Semacquire(s)
+ if raceenabled {
+ raceEnable()
+ raceAcquire(unsafe.Pointer(wg))
+ }
}