From: Michael Pratt Date: Mon, 25 Jul 2022 19:31:03 +0000 (-0400) Subject: runtime: convert schedt.gcwaiting to atomic type X-Git-Tag: go1.20rc1~1665 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=88ef50e6623e55875d783e5715be1dc0683717e0;p=gostls13.git runtime: convert schedt.gcwaiting to atomic type Note that this replaces numerous unsynchronized loads throughout the scheduler. For #53821. Change-Id: Ica80b04c9e8c184bfef186e549526fc3f117c387 Reviewed-on: https://go-review.googlesource.com/c/go/+/419447 Run-TryBot: Michael Pratt Reviewed-by: Austin Clements TryBot-Result: Gopher Robot --- diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index 5362ff0132..c38b725d4b 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -1063,7 +1063,7 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) { // Drain root marking jobs. if work.markrootNext < work.markrootJobs { // Stop if we're preemptible or if someone wants to STW. - for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) { + for !(gp.preempt && (preemptible || sched.gcwaiting.Load())) { job := atomic.Xadd(&work.markrootNext, +1) - 1 if job >= work.markrootJobs { break @@ -1077,7 +1077,7 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) { // Drain heap marking jobs. // Stop if we're preemptible or if someone wants to STW. - for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) { + for !(gp.preempt && (preemptible || sched.gcwaiting.Load())) { // Try to keep work available on the global queue. We used to // check if there were waiting workers, but it's better to // just keep work available than to make workers wait. In the diff --git a/src/runtime/proc.go b/src/runtime/proc.go index a7d60a024a..8c1865351a 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -880,7 +880,7 @@ func freezetheworld() { for i := 0; i < 5; i++ { // this should tell the scheduler to not start any new goroutines sched.stopwait = freezeStopWait - atomic.Store(&sched.gcwaiting, 1) + sched.gcwaiting.Store(true) // this should stop running goroutines if !preemptall() { break // no running goroutines @@ -1186,7 +1186,7 @@ func stopTheWorldWithSema() { lock(&sched.lock) sched.stopwait = gomaxprocs - atomic.Store(&sched.gcwaiting, 1) + sched.gcwaiting.Store(true) preemptall() // stop current P gp.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. @@ -1270,7 +1270,7 @@ func startTheWorldWithSema(emitTraceEvent bool) int64 { newprocs = 0 } p1 := procresize(procs) - sched.gcwaiting = 0 + sched.gcwaiting.Store(false) if sched.sysmonwait != 0 { sched.sysmonwait = 0 notewakeup(&sched.sysmonnote) @@ -2367,7 +2367,7 @@ func handoffp(pp *p) { return } lock(&sched.lock) - if sched.gcwaiting != 0 { + if sched.gcwaiting.Load() { pp.status = _Pgcstop sched.stopwait-- if sched.stopwait == 0 { @@ -2471,7 +2471,7 @@ func startlockedm(gp *g) { func gcstopm() { gp := getg() - if sched.gcwaiting == 0 { + if !sched.gcwaiting.Load() { throw("gcstopm: not waiting for gc") } if gp.m.spinning { @@ -2555,7 +2555,7 @@ func findRunnable() (gp *g, inheritTime, tryWakeP bool) { top: pp := mp.p.ptr() - if sched.gcwaiting != 0 { + if sched.gcwaiting.Load() { gcstopm() goto top } @@ -2719,7 +2719,7 @@ top: // return P and block lock(&sched.lock) - if sched.gcwaiting != 0 || pp.runSafePointFn != 0 { + if sched.gcwaiting.Load() || pp.runSafePointFn != 0 { unlock(&sched.lock) goto top } @@ -2901,7 +2901,7 @@ func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWo stealTimersOrRunNextG := i == stealTries-1 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() { - if sched.gcwaiting != 0 { + if sched.gcwaiting.Load() { // GC work may be available. return nil, false, now, pollUntil, true } @@ -3650,7 +3650,7 @@ func reentersyscall(pc, sp uintptr) { gp.m.oldp.set(pp) gp.m.p = 0 atomic.Store(&pp.status, _Psyscall) - if sched.gcwaiting != 0 { + if sched.gcwaiting.Load() { systemstack(entersyscall_gcwait) save(pc, sp) } @@ -5155,9 +5155,9 @@ func sysmon() { // from a timer to avoid adding system load to applications that spend // most of their time sleeping. now := nanotime() - if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || sched.npidle.Load() == gomaxprocs) { + if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) { lock(&sched.lock) - if atomic.Load(&sched.gcwaiting) != 0 || sched.npidle.Load() == gomaxprocs { + if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs { syscallWake := false next := timeSleepUntil() if next > now { @@ -5410,7 +5410,7 @@ func schedtrace(detailed bool) { lock(&sched.lock) print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize) if detailed { - print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") + print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") } // We must be careful while reading data from P's, M's and G's. // Even if we hold schedlock, most data can be changed concurrently. diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index bf1b53cb12..ed618dff05 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -817,7 +817,7 @@ type schedt struct { // m.exited is set. Linked through m.freelink. freem *m - gcwaiting uint32 // gc is waiting to run + gcwaiting atomic.Bool // gc is waiting to run stopwait int32 stopnote note sysmonwait uint32