t.arg = gp
tb := t.assignBucket()
lock(&tb.lock)
- tb.addtimerLocked(t)
+ if !tb.addtimerLocked(t) {
+ unlock(&tb.lock)
+ badTimer()
+ }
goparkunlock(&tb.lock, waitReasonSleep, traceEvGoSleep, 2)
}
func addtimer(t *timer) {
tb := t.assignBucket()
lock(&tb.lock)
- tb.addtimerLocked(t)
+ ok := tb.addtimerLocked(t)
unlock(&tb.lock)
+ if !ok {
+ badTimer()
+ }
}
// Add a timer to the heap and start or kick timerproc if the new timer is
// earlier than any of the others.
// Timers are locked.
-func (tb *timersBucket) addtimerLocked(t *timer) {
+// Returns whether all is well: false if the data structure is corrupt
+// due to user-level races.
+func (tb *timersBucket) addtimerLocked(t *timer) bool {
// when must never be negative; otherwise timerproc will overflow
// during its delta calculation and never expire other runtime timers.
if t.when < 0 {
}
t.i = len(tb.t)
tb.t = append(tb.t, t)
- siftupTimer(tb.t, t.i)
+ if !siftupTimer(tb.t, t.i) {
+ return false
+ }
if t.i == 0 {
// siftup moved to top: new earliest deadline.
if tb.sleeping {
tb.created = true
go timerproc(tb)
}
+ return true
}
// Delete timer t from the heap.
}
tb.t[last] = nil
tb.t = tb.t[:last]
+ ok := true
if i != last {
- siftupTimer(tb.t, i)
- siftdownTimer(tb.t, i)
+ if !siftupTimer(tb.t, i) {
+ ok = false
+ }
+ if !siftdownTimer(tb.t, i) {
+ ok = false
+ }
}
unlock(&tb.lock)
+ if !ok {
+ badTimer()
+ }
return true
}
if delta > 0 {
break
}
+ ok := true
if t.period > 0 {
// leave in heap but adjust next time to fire
t.when += t.period * (1 + -delta/t.period)
- siftdownTimer(tb.t, 0)
+ if !siftdownTimer(tb.t, 0) {
+ ok = false
+ }
} else {
// remove from heap
last := len(tb.t) - 1
tb.t[last] = nil
tb.t = tb.t[:last]
if last > 0 {
- siftdownTimer(tb.t, 0)
+ if !siftdownTimer(tb.t, 0) {
+ ok = false
+ }
}
t.i = -1 // mark as removed
}
arg := t.arg
seq := t.seq
unlock(&tb.lock)
+ if !ok {
+ badTimer()
+ }
if raceenabled {
raceacquire(unsafe.Pointer(t))
}
}
// Heap maintenance algorithms.
-
-func siftupTimer(t []*timer, i int) {
+// These algorithms check for slice index errors manually.
+// Slice index error can happen if the program is using racy
+// access to timers. We don't want to panic here, because
+// it will cause the program to crash with a mysterious
+// "panic holding locks" message. Instead, we panic while not
+// holding a lock.
+// The races can occur despite the bucket locks because assignBucket
+// itself is called without locks, so racy calls can cause a timer to
+// change buckets while executing these functions.
+
+func siftupTimer(t []*timer, i int) bool {
+ if i >= len(t) {
+ return false
+ }
when := t[i].when
tmp := t[i]
for i > 0 {
t[i] = tmp
t[i].i = i
}
+ return true
}
-func siftdownTimer(t []*timer, i int) {
+func siftdownTimer(t []*timer, i int) bool {
n := len(t)
+ if i >= n {
+ return false
+ }
when := t[i].when
tmp := t[i]
for {
t[i] = tmp
t[i].i = i
}
+ return true
+}
+
+// badTimer is called if the timer data structures have been corrupted,
+// presumably due to racy use by the program. We panic here rather than
+// panicing due to invalid slice access while holding locks.
+// See issue #25686.
+func badTimer() {
+ panic(errorString("racy use of timers"))
}
// Entry points for net, time to call nanotime.
"encoding/gob"
"encoding/json"
"fmt"
+ "internal/race"
"math/big"
"math/rand"
"os"
"runtime"
"strings"
+ "sync"
"testing"
"testing/quick"
. "time"
t.Errorf("readFile(%q) error = %v; want error containing 'is too large'", zero, err)
}
}
+
+// Issue 25686: hard crash on concurrent timer access.
+// This test deliberately invokes a race condition.
+// We are testing that we don't crash with "fatal error: panic holding locks".
+func TestConcurrentTimerReset(t *testing.T) {
+ if race.Enabled {
+ t.Skip("skipping test under race detector")
+ }
+
+ // We expect this code to panic rather than crash.
+ // Don't worry if it doesn't panic.
+ catch := func(i int) {
+ if e := recover(); e != nil {
+ t.Logf("panic in goroutine %d, as expected, with %q", i, e)
+ } else {
+ t.Logf("no panic in goroutine %d", i)
+ }
+ }
+
+ const goroutines = 8
+ const tries = 1000
+ var wg sync.WaitGroup
+ wg.Add(goroutines)
+ timer := NewTimer(Hour)
+ for i := 0; i < goroutines; i++ {
+ go func(i int) {
+ defer wg.Done()
+ defer catch(i)
+ for j := 0; j < tries; j++ {
+ timer.Reset(Hour + Duration(i*j))
+ }
+ }(i)
+ }
+ wg.Wait()
+}