// and the corresponding call to RUnlock “synchronizes before”
// the n+1'th call to Lock.
type RWMutex struct {
- w Mutex // held if there are pending writers
- writerSem uint32 // semaphore for writers to wait for completing readers
- readerSem uint32 // semaphore for readers to wait for completing writers
- readerCount int32 // number of pending readers
- readerWait int32 // number of departing readers
+ w Mutex // held if there are pending writers
+ writerSem uint32 // semaphore for writers to wait for completing readers
+ readerSem uint32 // semaphore for readers to wait for completing writers
+ readerCount atomic.Int32 // number of pending readers
+ readerWait atomic.Int32 // number of departing readers
}
const rwmutexMaxReaders = 1 << 30
_ = rw.w.state
race.Disable()
}
- if atomic.AddInt32(&rw.readerCount, 1) < 0 {
+ if rw.readerCount.Add(1) < 0 {
// A writer is pending, wait for it.
runtime_SemacquireMutex(&rw.readerSem, false, 0)
}
race.Disable()
}
for {
- c := atomic.LoadInt32(&rw.readerCount)
+ c := rw.readerCount.Load()
if c < 0 {
if race.Enabled {
race.Enable()
}
return false
}
- if atomic.CompareAndSwapInt32(&rw.readerCount, c, c+1) {
+ if rw.readerCount.CompareAndSwap(c, c+1) {
if race.Enabled {
race.Enable()
race.Acquire(unsafe.Pointer(&rw.readerSem))
race.ReleaseMerge(unsafe.Pointer(&rw.writerSem))
race.Disable()
}
- if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
+ if r := rw.readerCount.Add(-1); r < 0 {
// Outlined slow-path to allow the fast-path to be inlined
rw.rUnlockSlow(r)
}
fatal("sync: RUnlock of unlocked RWMutex")
}
// A writer is pending.
- if atomic.AddInt32(&rw.readerWait, -1) == 0 {
+ if rw.readerWait.Add(-1) == 0 {
// The last reader unblocks the writer.
runtime_Semrelease(&rw.writerSem, false, 1)
}
// First, resolve competition with other writers.
rw.w.Lock()
// Announce to readers there is a pending writer.
- r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders
+ r := rw.readerCount.Add(-rwmutexMaxReaders) + rwmutexMaxReaders
// Wait for active readers.
- if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
+ if r != 0 && rw.readerWait.Add(r) != 0 {
runtime_SemacquireMutex(&rw.writerSem, false, 0)
}
if race.Enabled {
}
return false
}
- if !atomic.CompareAndSwapInt32(&rw.readerCount, 0, -rwmutexMaxReaders) {
+ if !rw.readerCount.CompareAndSwap(0, -rwmutexMaxReaders) {
rw.w.Unlock()
if race.Enabled {
race.Enable()
}
// Announce to readers there is no active writer.
- r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
+ r := rw.readerCount.Add(rwmutexMaxReaders)
if r >= rwmutexMaxReaders {
race.Enable()
fatal("sync: Unlock of unlocked RWMutex")
-// +build !nacl,!386,!wasm,!arm,!gcflags_noopt
// errorcheck -0 -m
+//go:build !nacl && !386 && !wasm && !arm && !gcflags_noopt
+
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
var once *sync.Once
func small7() { // ERROR "can inline small7"
- // the Do fast path should be inlined
- once.Do(small5) // ERROR "inlining call to sync\.\(\*Once\)\.Do"
+ // the Do fast path should be inlined
+ once.Do(small5) // ERROR "inlining call to sync\.\(\*Once\)\.Do"
}
var rwmutex *sync.RWMutex
-func small8() { // ERROR "can inline small8"
- // the RUnlock fast path should be inlined
- rwmutex.RUnlock() // ERROR "inlining call to sync\.\(\*RWMutex\)\.RUnlock"
+func small8() {
+ // the RUnlock fast path should be inlined
+ rwmutex.RUnlock() // ERROR "inlining call to sync\.\(\*RWMutex\)\.RUnlock" "inlining call to atomic\.\(\*Int32\)\.Add"
}
-func small9() { // ERROR "can inline small9"
- // the RLock fast path should be inlined
- rwmutex.RLock() // ERROR "inlining call to sync\.\(\*RWMutex\)\.RLock"
+func small9() {
+ // the RLock fast path should be inlined
+ rwmutex.RLock() // ERROR "inlining call to sync\.\(\*RWMutex\)\.RLock" "inlining call to atomic\.\(\*Int32\)\.Add"
}
-