From 5203357ebacf9f41ca5e194d953c164049172e96 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 7 May 2021 22:51:29 -0700 Subject: [PATCH] cmd/compile: make non-concurrent compiles deterministic again Spreading function compilation across multiple goroutines results in non-deterministic output. This is how cmd/compile has historically behaved for concurrent builds, but is troublesome for non-concurrent builds, particularly because it interferes with "toolstash -cmp". I spent some time trying to think of a simple, unified algorithm that can concurrently schedule work but gracefully degrades to a deterministic build for single-worker builds, but I couldn't come up with any. The simplest idea I found was to simply abstract away the operation of scheduling work so that we can have alternative deterministic vs concurrent modes. Change-Id: I08afa00527ce1844432412f4f8553781c4e323df Reviewed-on: https://go-review.googlesource.com/c/go/+/318229 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/cmd/compile/internal/gc/compile.go | 59 ++++++++++++++++---------- 1 file changed, 36 insertions(+), 23 deletions(-) diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go index a7380510d1..00504451a8 100644 --- a/src/cmd/compile/internal/gc/compile.go +++ b/src/cmd/compile/internal/gc/compile.go @@ -119,38 +119,51 @@ func compileFunctions() { }) } - // We queue up a goroutine per function that needs to be - // compiled, but require them to grab an available worker ID - // before doing any substantial work to limit parallelism. - workerIDs := make(chan int, base.Flag.LowerC) - for i := 0; i < base.Flag.LowerC; i++ { - workerIDs <- i + // By default, we perform work right away on the current goroutine + // as the solo worker. + queue := func(work func(int)) { + work(0) + } + + if nWorkers := base.Flag.LowerC; nWorkers > 1 { + // For concurrent builds, we create a goroutine per task, but + // require them to hold a unique worker ID while performing work + // to limit parallelism. + workerIDs := make(chan int, nWorkers) + for i := 0; i < nWorkers; i++ { + workerIDs <- i + } + + queue = func(work func(int)) { + go func() { + worker := <-workerIDs + work(worker) + workerIDs <- worker + }() + } } var wg sync.WaitGroup - var asyncCompile func(*ir.Func) - asyncCompile = func(fn *ir.Func) { - wg.Add(1) - go func() { - worker := <-workerIDs - ssagen.Compile(fn, worker) - workerIDs <- worker - - // Done compiling fn. Schedule it's closures for compilation. - for _, closure := range fn.Closures { - asyncCompile(closure) - } - wg.Done() - }() + var compile func([]*ir.Func) + compile = func(fns []*ir.Func) { + wg.Add(len(fns)) + for _, fn := range fns { + fn := fn + queue(func(worker int) { + ssagen.Compile(fn, worker) + compile(fn.Closures) + wg.Done() + }) + } } types.CalcSizeDisabled = true // not safe to calculate sizes concurrently base.Ctxt.InParallel = true - for _, fn := range compilequeue { - asyncCompile(fn) - } + + compile(compilequeue) compilequeue = nil wg.Wait() + base.Ctxt.InParallel = false types.CalcSizeDisabled = false } -- 2.50.0