}
// No sweep on the first cycle.
- mheap_.sweepdone = 1
+ mheap_.sweepDrained = 1
// Set a reasonable initial GC trigger.
memstats.triggerRatio = 7 / 8.0
// First, wait for sweeping to finish. (We know there are no
// more spans on the sweep queue, but we may be concurrently
// sweeping spans, so we have to wait.)
- for atomic.Load(&work.cycles) == n+1 && atomic.Load(&mheap_.sweepers) != 0 {
+ for atomic.Load(&work.cycles) == n+1 && !isSweepDone() {
Gosched()
}
lock(&mheap_.lock)
mheap_.sweepgen += 2
- mheap_.sweepdone = 0
+ mheap_.sweepDrained = 0
mheap_.pagesSwept = 0
mheap_.sweepArenas = mheap_.allArenas
mheap_.reclaimIndex = 0
// Decrement the number of active sweepers and if this is the
// last one, mark sweep as complete.
l.blocking = false
- if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 {
+ if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepDrained) != 0 {
l.sweepIsDone()
}
}
// increment locks to ensure that the goroutine is not preempted
// in the middle of sweep thus leaving the span in an inconsistent state for next GC
_g_.m.locks++
- if atomic.Load(&mheap_.sweepdone) != 0 {
+ if atomic.Load(&mheap_.sweepDrained) != 0 {
_g_.m.locks--
return ^uintptr(0)
}
for {
s := mheap_.nextSpanForSweep()
if s == nil {
- noMoreWork = atomic.Cas(&mheap_.sweepdone, 0, 1)
+ noMoreWork = atomic.Cas(&mheap_.sweepDrained, 0, 1)
break
}
if state := s.state.get(); state != mSpanInUse {
return npages
}
-// isSweepDone reports whether all spans are swept or currently being swept.
+// isSweepDone reports whether all spans are swept.
//
// Note that this condition may transition from false to true at any
// time as the sweeper runs. It may transition from true to false if a
// GC runs; to prevent that the caller must be non-preemptible or must
// somehow block GC progress.
func isSweepDone() bool {
- return mheap_.sweepdone != 0
+ // Check that all spans have at least begun sweeping and there
+ // are no active sweepers. If both are true, then all spans
+ // have finished sweeping.
+ return atomic.Load(&mheap_.sweepDrained) != 0 && atomic.Load(&mheap_.sweepers) == 0
}
// Returns only when span s has been swept.
type mheap struct {
// lock must only be acquired on the system stack, otherwise a g
// could self-deadlock if its stack grows with the lock held.
- lock mutex
- pages pageAlloc // page allocation data structure
- sweepgen uint32 // sweep generation, see comment in mspan; written during STW
- sweepdone uint32 // all spans are swept
- sweepers uint32 // number of active sweepone calls
+ lock mutex
+ pages pageAlloc // page allocation data structure
+
+ sweepgen uint32 // sweep generation, see comment in mspan; written during STW
+ sweepDrained uint32 // all spans are swept or are being swept
+ sweepers uint32 // number of active sweepone calls
// allspans is a slice of all mspans ever created. Each mspan
// appears exactly once.
systemstack(func() {
// To prevent excessive heap growth, before allocating n pages
// we need to sweep and reclaim at least n pages.
- if h.sweepdone == 0 {
+ if !isSweepDone() {
h.reclaim(npages)
}
s = h.allocSpan(npages, spanAllocHeap, spanclass)