const (
fixedRootFinalizers = iota
fixedRootFlushCaches
+ fixedRootFreeGStacks
fixedRootCount
// rootBlockBytes is the number of bytes to scan per data or
flushallmcaches()
}
+ case i == fixedRootFreeGStacks:
+ // Only do this once per GC cycle; preferably
+ // concurrently.
+ if !work.markrootDone {
+ markrootFreeGStacks()
+ }
+
case baseSpans <= i && i < baseStacks:
// mark MSpan.specials
markrootSpans(gcw, int(i-baseSpans))
gp.waitsince = work.tstart
}
- if gcphase == _GCmarktermination && status == _Gdead {
- // Free gp's stack if necessary. Only do this
- // during mark termination because otherwise
- // _Gdead may be transient.
- shrinkstack(gp)
- }
-
if gcphase != _GCmarktermination && gp.startpc == gcBgMarkWorkerPC {
// GC background workers may be
// non-preemptible, so we may deadlock if we
scanblock(b, n, ptrmask, gcw)
}
+// markrootFreeGStacks frees stacks of dead Gs.
+//
+// This does not free stacks of dead Gs cached on Ps, but having a few
+// cached stacks around isn't a problem.
+//
+//TODO go:nowritebarrier
+func markrootFreeGStacks() {
+ // Take list of dead Gs with stacks.
+ lock(&sched.gflock)
+ list := sched.gfreeStack
+ sched.gfreeStack = nil
+ unlock(&sched.gflock)
+ if list == nil {
+ return
+ }
+
+ // Free stacks.
+ tail := list
+ for gp := list; gp != nil; gp = gp.schedlink.ptr() {
+ shrinkstack(gp)
+ tail = gp
+ }
+
+ // Put Gs back on the free list.
+ lock(&sched.gflock)
+ tail.schedlink.set(sched.gfreeNoStack)
+ sched.gfreeNoStack = list
+ unlock(&sched.gflock)
+}
+
// markrootSpans marks roots for one shard of work.spans.
//
//go:nowritebarrier