raceReleaseMerge(unsafe.Pointer(&rw.writerSem))
raceDisable()
}
- if atomic.AddInt32(&rw.readerCount, -1) < 0 {
+ if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
+ if r+1 == 0 || r+1 == -rwmutexMaxReaders {
+ raceEnable()
+ panic("sync: RUnlock of unlocked RWMutex")
+ }
// A writer is pending.
if atomic.AddInt32(&rw.readerWait, -1) == 0 {
// The last reader unblocks the writer.
// Announce to readers there is no active writer.
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
+ if r >= rwmutexMaxReaders {
+ raceEnable()
+ panic("sync: Unlock of unlocked RWMutex")
+ }
// Unblock blocked readers, if any.
for i := 0; i < int(r); i++ {
runtime_Semrelease(&rw.readerSem)
}
}
+func TestUnlockPanic(t *testing.T) {
+ defer func() {
+ if recover() == nil {
+ t.Fatalf("unlock of unlocked RWMutex did not panic")
+ }
+ }()
+ var mu RWMutex
+ mu.Unlock()
+}
+
+func TestUnlockPanic2(t *testing.T) {
+ defer func() {
+ if recover() == nil {
+ t.Fatalf("unlock of unlocked RWMutex did not panic")
+ }
+ }()
+ var mu RWMutex
+ mu.RLock()
+ mu.Unlock()
+}
+
+func TestRUnlockPanic(t *testing.T) {
+ defer func() {
+ if recover() == nil {
+ t.Fatalf("read unlock of unlocked RWMutex did not panic")
+ }
+ }()
+ var mu RWMutex
+ mu.RUnlock()
+}
+
+func TestRUnlockPanic2(t *testing.T) {
+ defer func() {
+ if recover() == nil {
+ t.Fatalf("read unlock of unlocked RWMutex did not panic")
+ }
+ }()
+ var mu RWMutex
+ mu.Lock()
+ mu.RUnlock()
+}
+
func BenchmarkRWMutexUncontended(b *testing.B) {
type PaddedRWMutex struct {
RWMutex