nwait uint32
// Number of roots of various root types. Set by gcMarkRootPrepare.
+ //
+ // nStackRoots == len(stackRoots), but we have nStackRoots for
+ // consistency.
nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int
// Base indexes of each root type. Set by gcMarkRootPrepare.
baseData, baseBSS, baseSpans, baseStacks, baseEnd uint32
+ // stackRoots is a snapshot of all of the Gs that existed
+ // before the beginning of concurrent marking. The backing
+ // store of this must not be modified because it might be
+ // shared with allgs.
+ stackRoots []*g
+
// Each type of GC state transition is protected by a lock.
// Since multiple threads can simultaneously detect the state
// transition condition, any thread that detects a transition
throw("work.full != 0")
}
+ // Drop allg snapshot. allgs may have grown, in which case
+ // this is the only reference to the old backing store and
+ // there's no need to keep it around.
+ work.stackRoots = nil
+
// Clear out buffers and double-check that all gcWork caches
// are empty. This should be ensured by gcMarkDone before we
// enter mark termination.
// ignore them because they begin life without any roots, so
// there's nothing to scan, and any roots they create during
// the concurrent phase will be caught by the write barrier.
- work.nStackRoots = int(atomic.Loaduintptr(&allglen))
+ work.stackRoots = allGsSnapshot()
+ work.nStackRoots = len(work.stackRoots)
work.markrootNext = 0
work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
default:
// the rest is scanning goroutine stacks
workCounter = &gcController.stackScanWork
- var gp *g
- if work.baseStacks <= i && i < work.baseEnd {
- // N.B. Atomic read of allglen in gcMarkRootPrepare
- // acts as a barrier to ensure that allgs must be large
- // enough to contain all relevant Gs.
- gp = allgs[i-work.baseStacks]
- } else {
+ if i < work.baseStacks || work.baseEnd <= i {
+ printlock()
+ print("runtime: markroot index ", i, " not in stack roots range [", work.baseStacks, ", ", work.baseEnd, ")\n")
throw("markroot: bad index")
}
+ gp := work.stackRoots[i-work.baseStacks]
// remember when we've first observed the G blocked
// needed only to output in traceback
unlock(&allglock)
}
+// allGsSnapshot returns a snapshot of the slice of all Gs.
+//
+// The world must be stopped or allglock must be held.
+func allGsSnapshot() []*g {
+ assertWorldStoppedOrLockHeld(&allglock)
+
+ // Because the world is stopped or allglock is held, allgadd
+ // cannot happen concurrently with this. allgs grows
+ // monotonically and existing entries never change, so we can
+ // simply return a copy of the slice header. For added safety,
+ // we trim everything past len because that can still change.
+ return allgs[:len(allgs):len(allgs)]
+}
+
// atomicAllG returns &allgs[0] and len(allgs) for use with atomicAllGIndex.
func atomicAllG() (**g, uintptr) {
length := atomic.Loaduintptr(&allglen)