//
//go:nowritebarrier
func (c *gcControllerState) enlistWorker() {
+ // If there are idle Ps, wake one so it will run an idle worker.
+ if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
+ wakep()
+ return
+ }
+
+ // There are no idle Ps. If we need more dedicated workers,
+ // try to preempt a running P so it will switch to a worker.
if c.dedicatedMarkWorkersNeeded <= 0 {
return
}
// obj must point to the beginning of a heap object or an oblet.
//go:nowritebarrier
func (w *gcWork) put(obj uintptr) {
+ flushed := false
wbuf := w.wbuf1.ptr()
if wbuf == nil {
w.init()
putfull(wbuf)
wbuf = getempty()
w.wbuf1 = wbufptrOf(wbuf)
+ flushed = true
}
}
wbuf.obj[wbuf.nobj] = obj
wbuf.nobj++
+
+ // If we put a buffer on full, let the GC controller know so
+ // it can encourage more workers to run. We delay this until
+ // the end of put so that w is in a consistent state, since
+ // enlistWorker may itself manipulate w.
+ if flushed && gcphase == _GCmark {
+ gcController.enlistWorker()
+ }
}
// putFast does a put and returns true if it can be done quickly
w.wbuf2 = wbufptrOf(getempty())
} else if wbuf := w.wbuf1.ptr(); wbuf.nobj > 4 {
w.wbuf1 = wbufptrOf(handoff(wbuf))
+ } else {
+ return
+ }
+ // We flushed a buffer to the full list, so wake a worker.
+ if gcphase == _GCmark {
+ gcController.enlistWorker()
}
}
func putfull(b *workbuf) {
b.checknonempty()
lfstackpush(&work.full, &b.node)
-
- // We just made more work available. Let the GC controller
- // know so it can encourage more workers to run.
- if gcphase == _GCmark {
- gcController.enlistWorker()
- }
}
// trygetfull tries to get a full or partially empty workbuffer.
}
}
+ // Check for idle-priority GC work again.
+ if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) {
+ lock(&sched.lock)
+ _p_ = pidleget()
+ if _p_ != nil && _p_.gcBgMarkWorker == 0 {
+ pidleput(_p_)
+ _p_ = nil
+ }
+ unlock(&sched.lock)
+ if _p_ != nil {
+ acquirep(_p_)
+ if wasSpinning {
+ _g_.m.spinning = true
+ atomic.Xadd(&sched.nmspinning, 1)
+ }
+ // Go back to idle GC check.
+ goto stop
+ }
+ }
+
// poll network
if netpollinited() && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
if _g_.m.p != 0 {