]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: ensure fair scheduling during frequent GCs
authorDmitriy Vyukov <dvyukov@google.com>
Tue, 21 Jan 2014 06:24:42 +0000 (10:24 +0400)
committerDmitriy Vyukov <dvyukov@google.com>
Tue, 21 Jan 2014 06:24:42 +0000 (10:24 +0400)
What was happenning is as follows:
Each writer goroutine always triggers GC during its scheduling quntum.
After GC goroutines are shuffled so that the timer goroutine is always second in the queue.
This repeats infinitely, causing timer goroutine starvation.
Fixes #7126.

R=golang-codereviews, shanemhansen, khr, khr
CC=golang-codereviews
https://golang.org/cl/53080043

src/pkg/runtime/proc.c
src/pkg/runtime/proc_test.go

index 29b6a7c763984f1a93443efadf5c8d16509c1ee9..693cacaa5885169385136c90f431fb5b28974783 100644 (file)
@@ -2207,6 +2207,7 @@ static void
 procresize(int32 new)
 {
        int32 i, old;
+       bool empty;
        G *gp;
        P *p;
 
@@ -2231,11 +2232,27 @@ procresize(int32 new)
        }
 
        // redistribute runnable G's evenly
-       // collect all runnable goroutines in global queue
-       for(i = 0; i < old; i++) {
-               p = runtime·allp[i];
-               while(gp = runqget(p))
-                       globrunqput(gp);
+       // collect all runnable goroutines in global queue preserving FIFO order
+       // FIFO order is required to ensure fairness even during frequent GCs
+       // see http://golang.org/issue/7126
+       empty = false;
+       while(!empty) {
+               empty = true;
+               for(i = 0; i < old; i++) {
+                       p = runtime·allp[i];
+                       if(p->runqhead == p->runqtail)
+                               continue;
+                       empty = false;
+                       // pop from tail of local queue
+                       p->runqtail--;
+                       gp = p->runq[p->runqtail%nelem(p->runq)];
+                       // push onto head of global queue
+                       gp->schedlink = runtime·sched.runqhead;
+                       runtime·sched.runqhead = gp;
+                       if(runtime·sched.runqtail == nil)
+                               runtime·sched.runqtail = gp;
+                       runtime·sched.runqsize++;
+               }
        }
        // fill local queues with at most nelem(p->runq)/2 goroutines
        // start at 1 because current M already executes some G and will acquire allp[0] below,
index dd70ed97d7633323f1dcc23ae2f53758dc455ea3..d3f1f8bb1c865c609776673e2aaa3745bbbac9de 100644 (file)
@@ -244,6 +244,49 @@ func TestPreemptionGC(t *testing.T) {
        atomic.StoreUint32(&stop, 1)
 }
 
+func TestGCFairness(t *testing.T) {
+       output := executeTest(t, testGCFairnessSource, nil)
+       want := "OK\n"
+       if output != want {
+               t.Fatalf("want %s, got %s\n", want, output)
+       }
+}
+
+const testGCFairnessSource = `
+package main
+
+import (
+       "fmt"
+       "os"
+       "runtime"
+       "time"
+)
+
+func main() {
+       runtime.GOMAXPROCS(1)
+       f, err := os.Open("/dev/null")
+       if os.IsNotExist(err) {
+               // This test tests what it is intended to test only if writes are fast.
+               // If there is no /dev/null, we just don't execute the test.
+               fmt.Println("OK\n")
+               return
+       }
+       if err != nil {
+               fmt.Println(err)
+               os.Exit(1)
+       }
+       for i := 0; i < 2; i++ {
+               go func() {
+                       for {
+                               f.Write([]byte("."))
+                       }
+               }()
+       }
+       time.Sleep(10 * time.Millisecond)
+       fmt.Println("OK")
+}
+`
+
 func stackGrowthRecursive(i int) {
        var pad [128]uint64
        if i != 0 && pad[0] == 0 {