// reclaimed until the next GC cycle.
clearpools()
+ gcResetGState()
gcResetMarkState()
work.finalizersDone = false
gcController.endCycle()
} else {
- // For non-concurrent GC (mode != gcBackgroundMode)
- // The g stacks have not been scanned so clear g state
- // such that mark termination scans all stacks.
- gcResetGState()
-
t := nanotime()
tScan, tInstallWB, tMark, tMarkTerm = t, t, t, t
heapGoal = heap0
unlock(&mheap_.lock)
}
-// gcResetGState resets the GC state of all G's and returns the length
-// of allgs.
-func gcResetGState() (numgs int) {
+// gcResetGState resets the GC state of all G's. Any Gs created after
+// this will also be in this reset state.
+func gcResetGState() {
// This may be called during a concurrent phase, so make sure
// allgs doesn't change.
lock(&allglock)
gp.gcscanvalid = false // stack has not been scanned
gp.gcAssistBytes = 0
}
- numgs = len(allgs)
unlock(&allglock)
- return
}
// gcResetMarkState resets state prior to marking (concurrent or STW).
// runtimeĀ·restartg(mastergp) to make it Grunnable.
// At the bottom we will want to return this p back to the scheduler.
- // Prepare flag indicating that the scan has not been completed.
- local_allglen := gcResetGState()
+ // Snapshot of allglen. During concurrent scan, we just need
+ // to be consistent about how many markroot jobs we create and
+ // how many Gs we check. Gs may be created after this and
+ // they'll be scanned during mark termination. During mark
+ // termination, allglen isn't changing.
+ local_allglen := int(atomicloaduintptr(&allglen))
work.ndone = 0
useOneP := uint32(1) // For now do not do this in parallel.