From 7ebb187e8e5e588d8c594213ff5187917c4abb20 Mon Sep 17 00:00:00 2001 From: Dmitriy Vyukov Date: Thu, 27 Jun 2013 21:03:35 +0400 Subject: [PATCH] undo CL 9776044 / 1e280889f997 MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Failure on bot: http://build.golang.org/log/f4c648906e1289ec2237c1d0880fb1a8b1852a08 ««« original CL description runtime: fix CPU underutilization runtime.newproc/ready are deliberately sloppy about waking new M's, they only ensure that there is at least 1 spinning M. Currently to compensate for that, schedule() checks if the current P has local work and there are no spinning M's, it wakes up another one. It does not work if goroutines do not call schedule. With this change a spinning M wakes up another M when it finds work to do. It's also not ideal, but it fixes the underutilization. A proper check would require to know the exact number of runnable G's, but it's too expensive to maintain. Fixes #5586. R=rsc TBR=rsc CC=gobot, golang-dev https://golang.org/cl/9776044 »»» R=golang-dev CC=golang-dev https://golang.org/cl/10692043 --- src/pkg/runtime/proc.c | 41 ++++++++++++------------------------ src/pkg/runtime/proc_test.go | 24 --------------------- 2 files changed, 14 insertions(+), 51 deletions(-) diff --git a/src/pkg/runtime/proc.c b/src/pkg/runtime/proc.c index 6dcf564cb0..e6844032a6 100644 --- a/src/pkg/runtime/proc.c +++ b/src/pkg/runtime/proc.c @@ -1018,7 +1018,7 @@ execute(G *gp) // Finds a runnable goroutine to execute. // Tries to steal from other P's, get g from global queue, poll network. static G* -findrunnable1(void) +findrunnable(void) { G *gp; P *p; @@ -1127,29 +1127,6 @@ stop: goto top; } -static G* -findrunnable(void) -{ - G *gp; - int32 nmspinning; - - gp = findrunnable1(); // blocks until work is available - if(m->spinning) { - m->spinning = false; - nmspinning = runtime·xadd(&runtime·sched.nmspinning, -1); - if(nmspinning < 0) - runtime·throw("findrunnable: negative nmspinning"); - } else - nmspinning = runtime·atomicload(&runtime·sched.nmspinning); - - // M wakeup policy is deliberately somewhat conservative (see nmspinning handling), - // so see if we need to wakeup another P here. - if (nmspinning == 0 && runtime·atomicload(&runtime·sched.npidle) > 0) - wakep(); - - return gp; -} - // Injects the list of runnable G's into the scheduler. // Can run concurrently with GC. static void @@ -1208,11 +1185,21 @@ top: runtime·throw("schedule: spinning with local work"); } if(gp == nil) - gp = findrunnable(); // blocks until work is available + gp = findrunnable(); + + if(m->spinning) { + m->spinning = false; + runtime·xadd(&runtime·sched.nmspinning, -1); + } + + // M wakeup policy is deliberately somewhat conservative (see nmspinning handling), + // so see if we need to wakeup another M here. + if (m->p->runqhead != m->p->runqtail && + runtime·atomicload(&runtime·sched.nmspinning) == 0 && + runtime·atomicload(&runtime·sched.npidle) > 0) // TODO: fast atomic + wakep(); if(gp->lockedm) { - // Hands off own p to the locked m, - // then blocks waiting for a new p. startlockedm(gp); goto top; } diff --git a/src/pkg/runtime/proc_test.go b/src/pkg/runtime/proc_test.go index c72d54edbe..83368e0c33 100644 --- a/src/pkg/runtime/proc_test.go +++ b/src/pkg/runtime/proc_test.go @@ -93,30 +93,6 @@ func TestYieldLocked(t *testing.T) { <-c } -func TestGoroutineParallelism(t *testing.T) { - const P = 4 - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P)) - for try := 0; try < 10; try++ { - done := make(chan bool) - x := uint32(0) - for p := 0; p < P; p++ { - // Test that all P goroutines are scheduled at the same time - go func(p int) { - for i := 0; i < 3; i++ { - expected := uint32(P*i + p) - for atomic.LoadUint32(&x) != expected { - } - atomic.StoreUint32(&x, expected+1) - } - done <- true - }(p) - } - for p := 0; p < P; p++ { - <-done - } - } -} - func TestBlockLocked(t *testing.T) { const N = 10 c := make(chan bool) -- 2.48.1