procresize(int32 new)
{
int32 i, old;
+ bool empty;
G *gp;
P *p;
}
// redistribute runnable G's evenly
- // collect all runnable goroutines in global queue
- for(i = 0; i < old; i++) {
- p = runtime·allp[i];
- while(gp = runqget(p))
- globrunqput(gp);
+ // collect all runnable goroutines in global queue preserving FIFO order
+ // FIFO order is required to ensure fairness even during frequent GCs
+ // see http://golang.org/issue/7126
+ empty = false;
+ while(!empty) {
+ empty = true;
+ for(i = 0; i < old; i++) {
+ p = runtime·allp[i];
+ if(p->runqhead == p->runqtail)
+ continue;
+ empty = false;
+ // pop from tail of local queue
+ p->runqtail--;
+ gp = p->runq[p->runqtail%nelem(p->runq)];
+ // push onto head of global queue
+ gp->schedlink = runtime·sched.runqhead;
+ runtime·sched.runqhead = gp;
+ if(runtime·sched.runqtail == nil)
+ runtime·sched.runqtail = gp;
+ runtime·sched.runqsize++;
+ }
}
// fill local queues with at most nelem(p->runq)/2 goroutines
// start at 1 because current M already executes some G and will acquire allp[0] below,
atomic.StoreUint32(&stop, 1)
}
+func TestGCFairness(t *testing.T) {
+ output := executeTest(t, testGCFairnessSource, nil)
+ want := "OK\n"
+ if output != want {
+ t.Fatalf("want %s, got %s\n", want, output)
+ }
+}
+
+const testGCFairnessSource = `
+package main
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "time"
+)
+
+func main() {
+ runtime.GOMAXPROCS(1)
+ f, err := os.Open("/dev/null")
+ if os.IsNotExist(err) {
+ // This test tests what it is intended to test only if writes are fast.
+ // If there is no /dev/null, we just don't execute the test.
+ fmt.Println("OK\n")
+ return
+ }
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ for i := 0; i < 2; i++ {
+ go func() {
+ for {
+ f.Write([]byte("."))
+ }
+ }()
+ }
+ time.Sleep(10 * time.Millisecond)
+ fmt.Println("OK")
+}
+`
+
func stackGrowthRecursive(i int) {
var pad [128]uint64
if i != 0 && pad[0] == 0 {