const (
fixedRootFinalizers = iota
- fixedRootFlushCaches
fixedRootFreeGStacks
fixedRootCount
//
//go:nowritebarrier
func gcMarkRootPrepare() {
+ if gcphase == _GCmarktermination {
+ work.nFlushCacheRoots = int(gomaxprocs)
+ } else {
+ work.nFlushCacheRoots = 0
+ }
+
// Compute how many data and BSS root blocks there are.
nBlocks := func(bytes uintptr) int {
return int((bytes + rootBlockBytes - 1) / rootBlockBytes)
}
work.markrootNext = 0
- work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots + work.nRescanRoots)
+ work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots + work.nRescanRoots)
}
// gcMarkRootCheck checks that all roots have been scanned. It is
func markroot(gcw *gcWork, i uint32) {
// TODO(austin): This is a bit ridiculous. Compute and store
// the bases in gcMarkRootPrepare instead of the counts.
- baseData := uint32(fixedRootCount)
+ baseFlushCache := uint32(fixedRootCount)
+ baseData := baseFlushCache + uint32(work.nFlushCacheRoots)
baseBSS := baseData + uint32(work.nDataRoots)
baseSpans := baseBSS + uint32(work.nBSSRoots)
baseStacks := baseSpans + uint32(work.nSpanRoots)
// Note: if you add a case here, please also update heapdump.go:dumproots.
switch {
+ case baseFlushCache <= i && i < baseData:
+ flushmcache(int(i - baseFlushCache))
+
case baseData <= i && i < baseBSS:
for datap := &firstmoduledata; datap != nil; datap = datap.next {
markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-baseData))
scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), uintptr(fb.cnt)*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw)
}
- case i == fixedRootFlushCaches:
- if gcphase == _GCmarktermination { // Do not flush mcaches during concurrent phase.
- flushallmcaches()
- }
-
case i == fixedRootFreeGStacks:
// Only do this once per GC cycle; preferably
// concurrently.