ndone uint32
alldone note
- // helperDrainBlock indicates that GC mark termination helpers
- // should pass gcDrainBlock to gcDrain to block in the
- // getfull() barrier. Otherwise, they should pass gcDrainNoBlock.
- //
- // TODO: This is a temporary fallback to work around races
- // that cause early mark termination.
- helperDrainBlock bool
-
// Number of roots of various root types. Set by gcMarkRootPrepare.
nFlushCacheRoots int
nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int
gcResetMarkState()
initCheckmarks()
gcw := &getg().m.p.ptr().gcw
- gcDrain(gcw, gcDrainNoBlock)
+ gcDrain(gcw, 0)
wbBufFlush1(getg().m.p.ptr())
gcw.dispose()
clearCheckmarks()
}
// Go back to draining, this time
// without preemption.
- gcDrain(&_p_.gcw, gcDrainNoBlock|gcDrainFlushBgCredit)
+ gcDrain(&_p_.gcw, gcDrainFlushBgCredit)
case gcMarkWorkerFractionalMode:
gcDrain(&_p_.gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit)
case gcMarkWorkerIdleMode:
work.nwait = 0
work.ndone = 0
work.nproc = uint32(gcprocs())
- work.helperDrainBlock = false
// Check that there's no marking work remaining.
if work.full != 0 || work.nDataRoots+work.nBSSRoots+work.nSpanRoots+work.nStackRoots != 0 {
gchelperstart()
gcw := &getg().m.p.ptr().gcw
- if work.helperDrainBlock {
- gcDrain(gcw, gcDrainBlock)
- } else {
- gcDrain(gcw, gcDrainNoBlock)
- }
+ gcDrain(gcw, 0)
if debug.gccheckmark > 0 {
// This is expensive when there's a large number of
// Parallel mark over GC roots and heap
if gcphase == _GCmarktermination {
gcw := &_g_.m.p.ptr().gcw
- if work.helperDrainBlock {
- gcDrain(gcw, gcDrainBlock) // blocks in getfull
- } else {
- gcDrain(gcw, gcDrainNoBlock)
- }
+ gcDrain(gcw, 0)
}
nproc := atomic.Load(&work.nproc) // work.nproc can change right after we increment work.ndone
const (
gcDrainUntilPreempt gcDrainFlags = 1 << iota
- gcDrainNoBlock
gcDrainFlushBgCredit
gcDrainIdle
gcDrainFractional
-
- // gcDrainBlock means neither gcDrainUntilPreempt or
- // gcDrainNoBlock. It is the default, but callers should use
- // the constant for documentation purposes.
- gcDrainBlock gcDrainFlags = 0
)
// gcDrain scans roots and objects in work buffers, blackening grey
-// objects until all roots and work buffers have been drained.
+// objects until it is unable to get more work. It may return before
+// GC is done; it's the caller's responsibility to balance work from
+// other Ps.
//
// If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt
-// is set. This implies gcDrainNoBlock.
+// is set.
//
// If flags&gcDrainIdle != 0, gcDrain returns when there is other work
-// to do. This implies gcDrainNoBlock.
+// to do.
//
// If flags&gcDrainFractional != 0, gcDrain self-preempts when
// pollFractionalWorkerExit() returns true. This implies
// gcDrainNoBlock.
//
-// If flags&gcDrainNoBlock != 0, gcDrain returns as soon as it is
-// unable to get more work. Otherwise, it will block until all
-// blocking calls are blocked in gcDrain.
-//
// If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work
// credit to gcController.bgScanCredit every gcCreditSlack units of
// scan work.
gp := getg().m.curg
preemptible := flags&gcDrainUntilPreempt != 0
- blocking := flags&(gcDrainUntilPreempt|gcDrainIdle|gcDrainFractional|gcDrainNoBlock) == 0
flushBgCredit := flags&gcDrainFlushBgCredit != 0
idle := flags&gcDrainIdle != 0
gcw.balance()
}
- var b uintptr
- if blocking {
- b = gcw.get()
- } else {
- b = gcw.tryGetFast()
+ b := gcw.tryGetFast()
+ if b == 0 {
+ b = gcw.tryGet()
if b == 0 {
+ // Flush the write barrier
+ // buffer; this may create
+ // more work.
+ wbBufFlush(nil, 0)
b = gcw.tryGet()
- if b == 0 {
- // Flush the write barrier
- // buffer; this may create
- // more work.
- wbBufFlush(nil, 0)
- b = gcw.tryGet()
- }
}
}
if b == 0 {
- // work barrier reached or tryGet failed.
+ // Unable to get work.
break
}
scanobject(b, gcw)
}
}
- // In blocking mode, write barriers are not allowed after this
- // point because we must preserve the condition that the work
- // buffers are empty.
-
done:
// Flush remaining scan work credit.
if gcw.scanWork > 0 {
//
// (preemption must be disabled)
// gcw := &getg().m.p.ptr().gcw
-// .. call gcw.put() to produce and gcw.get() to consume ..
+// .. call gcw.put() to produce and gcw.tryGet() to consume ..
//
// It's important that any use of gcWork during the mark phase prevent
// the garbage collector from transitioning to mark termination since
return wbuf.obj[wbuf.nobj]
}
-// get dequeues a pointer for the garbage collector to trace, blocking
-// if necessary to ensure all pointers from all queues and caches have
-// been retrieved. get returns 0 if there are no pointers remaining.
-//go:nowritebarrierrec
-func (w *gcWork) get() uintptr {
- wbuf := w.wbuf1
- if wbuf == nil {
- w.init()
- wbuf = w.wbuf1
- // wbuf is empty at this point.
- }
- if wbuf.nobj == 0 {
- w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
- wbuf = w.wbuf1
- if wbuf.nobj == 0 {
- owbuf := wbuf
- wbuf = getfull()
- if wbuf == nil {
- return 0
- }
- putempty(owbuf)
- w.wbuf1 = wbuf
- }
- }
-
- // TODO: This might be a good place to add prefetch code
-
- wbuf.nobj--
- return wbuf.obj[wbuf.nobj]
-}
-
// dispose returns any cached pointers to the global queue.
// The buffers are being put on the full queue so that the
// write barriers will not simply reacquire them before the
return b
}
-// Get a full work buffer off the work.full list.
-// If nothing is available wait until all the other gc helpers have
-// finished and then return nil.
-// getfull acts as a barrier for work.nproc helpers. As long as one
-// gchelper is actively marking objects it
-// may create a workbuffer that the other helpers can work on.
-// The for loop either exits when a work buffer is found
-// or when _all_ of the work.nproc GC helpers are in the loop
-// looking for work and thus not capable of creating new work.
-// This is in fact the termination condition for the STW mark
-// phase.
-//go:nowritebarrier
-func getfull() *workbuf {
- b := (*workbuf)(work.full.pop())
- if b != nil {
- b.checknonempty()
- return b
- }
-
- incnwait := atomic.Xadd(&work.nwait, +1)
- if incnwait > work.nproc {
- println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc)
- throw("work.nwait > work.nproc")
- }
- for i := 0; ; i++ {
- if work.full != 0 {
- decnwait := atomic.Xadd(&work.nwait, -1)
- if decnwait == work.nproc {
- println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
- throw("work.nwait > work.nproc")
- }
- b = (*workbuf)(work.full.pop())
- if b != nil {
- b.checknonempty()
- return b
- }
- incnwait := atomic.Xadd(&work.nwait, +1)
- if incnwait > work.nproc {
- println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc)
- throw("work.nwait > work.nproc")
- }
- }
- if work.nwait == work.nproc && work.markrootNext >= work.markrootJobs {
- return nil
- }
- if i < 10 {
- procyield(20)
- } else if i < 20 {
- osyield()
- } else {
- usleep(100)
- }
- }
-}
-
//go:nowritebarrier
func handoff(b *workbuf) *workbuf {
// Make new buffer with half of b's pointers.