"io/ioutil"
rtrace "runtime/trace"
"strings"
+ "sync"
"testing"
+ "time"
)
// stacks is a fake stack map populated for test.
}
}
+
+func TestDirectSemaphoreHandoff(t *testing.T) {
+ prog0 := func() {
+ var mu sync.Mutex
+ var wg sync.WaitGroup
+ mu.Lock()
+ // This is modeled after src/sync/mutex_test.go to trigger Mutex
+ // starvation mode, in which the goroutine that calls Unlock hands off
+ // both the semaphore and its remaining time slice. See issue 36186.
+ for i := 0; i < 2; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 100; i++ {
+ mu.Lock()
+ time.Sleep(100 * time.Microsecond)
+ mu.Unlock()
+ }
+ }()
+ }
+ mu.Unlock()
+ wg.Wait()
+ }
+ if err := traceProgram(t, prog0, "TestDirectSemaphoreHandoff"); err != nil {
+ t.Fatalf("failed to trace the program: %v", err)
+ }
+ _, err := parseTrace()
+ if err != nil {
+ t.Fatalf("failed to parse the trace: %v", err)
+ }
+}
}
// goyield is like Gosched, but it:
-// - does not emit a GoSched trace event
+// - emits a GoPreempt trace event instead of a GoSched trace event
// - puts the current G on the runq of the current P instead of the globrunq
func goyield() {
checkTimeouts()
}
func goyield_m(gp *g) {
+ if trace.enabled {
+ traceGoPreempt()
+ }
pp := gp.m.p.ptr()
casgstatus(gp, _Grunning, _Grunnable)
dropg()
// the waiter G immediately.
// Note that waiter inherits our time slice: this is desirable
// to avoid having a highly contended semaphore hog the P
- // indefinitely. goyield is like Gosched, but it does not emit a
- // GoSched trace event and, more importantly, puts the current G
- // on the local runq instead of the global one.
+ // indefinitely. goyield is like Gosched, but it emits a
+ // "preempted" trace event instead and, more importantly, puts
+ // the current G on the local runq instead of the global one.
// We only do this in the starving regime (handoff=true), as in
// the non-starving case it is possible for a different waiter
// to acquire the semaphore while we are yielding/scheduling,