spanBudget := 100
var s *mspan
- sl := newSweepLocker()
- sg := sl.sweepGen
+ var sl sweepLocker
// Try partial swept spans first.
+ sg := mheap_.sweepgen
if s = c.partialSwept(sg).pop(); s != nil {
goto havespan
}
- // Now try partial unswept spans.
- for ; spanBudget >= 0; spanBudget-- {
- s = c.partialUnswept(sg).pop()
- if s == nil {
- break
- }
- if s, ok := sl.tryAcquire(s); ok {
- // We got ownership of the span, so let's sweep it and use it.
- s.sweep(true)
- sl.dispose()
- goto havespan
- }
- // We failed to get ownership of the span, which means it's being or
- // has been swept by an asynchronous sweeper that just couldn't remove it
- // from the unswept list. That sweeper took ownership of the span and
- // responsibility for either freeing it to the heap or putting it on the
- // right swept list. Either way, we should just ignore it (and it's unsafe
- // for us to do anything else).
- }
- // Now try full unswept spans, sweeping them and putting them into the
- // right list if we fail to get a span.
- for ; spanBudget >= 0; spanBudget-- {
- s = c.fullUnswept(sg).pop()
- if s == nil {
- break
- }
- if s, ok := sl.tryAcquire(s); ok {
- // We got ownership of the span, so let's sweep it.
- s.sweep(true)
- // Check if there's any free space.
- freeIndex := s.nextFreeIndex()
- if freeIndex != s.nelems {
- s.freeindex = freeIndex
- sl.dispose()
+ sl = sweep.active.begin()
+ if sl.valid {
+ // Now try partial unswept spans.
+ for ; spanBudget >= 0; spanBudget-- {
+ s = c.partialUnswept(sg).pop()
+ if s == nil {
+ break
+ }
+ if s, ok := sl.tryAcquire(s); ok {
+ // We got ownership of the span, so let's sweep it and use it.
+ s.sweep(true)
+ sweep.active.end(sl)
goto havespan
}
- // Add it to the swept list, because sweeping didn't give us any free space.
- c.fullSwept(sg).push(s.mspan)
+ // We failed to get ownership of the span, which means it's being or
+ // has been swept by an asynchronous sweeper that just couldn't remove it
+ // from the unswept list. That sweeper took ownership of the span and
+ // responsibility for either freeing it to the heap or putting it on the
+ // right swept list. Either way, we should just ignore it (and it's unsafe
+ // for us to do anything else).
+ }
+ // Now try full unswept spans, sweeping them and putting them into the
+ // right list if we fail to get a span.
+ for ; spanBudget >= 0; spanBudget-- {
+ s = c.fullUnswept(sg).pop()
+ if s == nil {
+ break
+ }
+ if s, ok := sl.tryAcquire(s); ok {
+ // We got ownership of the span, so let's sweep it.
+ s.sweep(true)
+ // Check if there's any free space.
+ freeIndex := s.nextFreeIndex()
+ if freeIndex != s.nelems {
+ s.freeindex = freeIndex
+ sweep.active.end(sl)
+ goto havespan
+ }
+ // Add it to the swept list, because sweeping didn't give us any free space.
+ c.fullSwept(sg).push(s.mspan)
+ }
+ // See comment for partial unswept spans.
+ }
+ sweep.active.end(sl)
+ if trace.enabled {
+ traceGCSweepDone()
+ traceDone = true
}
- // See comment for partial unswept spans.
- }
- sl.dispose()
- if trace.enabled {
- traceGCSweepDone()
- traceDone = true
}
// We failed to get a span from the mcentral so get one from mheap.
nbgsweep uint32
npausesweep uint32
+ // active tracks outstanding sweepers and the sweep
+ // termination condition.
+ active activeSweep
+
// centralIndex is the current unswept span class.
// It represents an index into the mcentral span
// sets. Accessed and updated via its load and
return nil
}
+const sweepDrainedMask = 1 << 31
+
+// activeSweep is a type that captures whether sweeping
+// is done, and whether there are any outstanding sweepers.
+//
+// Every potential sweeper must call begin() before they look
+// for work, and end() after they've finished sweeping.
+type activeSweep struct {
+ // state is divided into two parts.
+ //
+ // The top bit (masked by sweepDrainedMask) is a boolean
+ // value indicating whether all the sweep work has been
+ // drained from the queue.
+ //
+ // The rest of the bits are a counter, indicating the
+ // number of outstanding concurrent sweepers.
+ state atomic.Uint32
+}
+
+// begin registers a new sweeper. Returns a sweepLocker
+// for acquiring spans for sweeping. Any outstanding sweeper blocks
+// sweep termination.
+//
+// If the sweepLocker is invalid, the caller can be sure that all
+// outstanding sweep work has been drained, so there is nothing left
+// to sweep. Note that there may be sweepers currently running, so
+// this does not indicate that all sweeping has completed.
+//
+// Even if the sweepLocker is invalid, its sweepGen is always valid.
+func (a *activeSweep) begin() sweepLocker {
+ for {
+ state := a.state.Load()
+ if state&sweepDrainedMask != 0 {
+ return sweepLocker{mheap_.sweepgen, false}
+ }
+ if a.state.CompareAndSwap(state, state+1) {
+ return sweepLocker{mheap_.sweepgen, true}
+ }
+ }
+}
+
+// end deregisters a sweeper. Must be called once for each time
+// begin is called if the sweepLocker is valid.
+func (a *activeSweep) end(sl sweepLocker) {
+ if sl.sweepGen != mheap_.sweepgen {
+ throw("sweeper left outstanding across sweep generations")
+ }
+ for {
+ state := a.state.Load()
+ if (state&^sweepDrainedMask)-1 >= sweepDrainedMask {
+ throw("mismatched begin/end of activeSweep")
+ }
+ if a.state.CompareAndSwap(state, state-1) {
+ if state != sweepDrainedMask {
+ return
+ }
+ if debug.gcpacertrace > 0 {
+ print("pacer: sweep done at heap size ", gcController.heapLive>>20, "MB; allocated ", (gcController.heapLive-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept.Load(), " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
+ }
+ return
+ }
+ }
+}
+
+// markDrained marks the active sweep cycle as having drained
+// all remaining work. This is safe to be called concurrently
+// with all other methods of activeSweep, though may race.
+//
+// Returns true if this call was the one that actually performed
+// the mark.
+func (a *activeSweep) markDrained() bool {
+ for {
+ state := a.state.Load()
+ if state&sweepDrainedMask != 0 {
+ return false
+ }
+ if a.state.CompareAndSwap(state, state|sweepDrainedMask) {
+ return true
+ }
+ }
+}
+
+// sweepers returns the current number of active sweepers.
+func (a *activeSweep) sweepers() uint32 {
+ return a.state.Load() &^ sweepDrainedMask
+}
+
+// isDone returns true if all sweep work has been drained and no more
+// outstanding sweepers exist. That is, when the sweep phase is
+// completely done.
+func (a *activeSweep) isDone() bool {
+ return a.state.Load() == sweepDrainedMask
+}
+
+// reset sets up the activeSweep for the next sweep cycle.
+//
+// The world must be stopped.
+func (a *activeSweep) reset() {
+ assertWorldStopped()
+ a.state.Store(0)
+}
+
// finishsweep_m ensures that all spans are swept.
//
// The world must be stopped. This ensures there are no sweeps in
sweep.npausesweep++
}
+ // Make sure there aren't any outstanding sweepers left.
+ // At this point, with the world stopped, it means one of two
+ // things. Either we were able to preempt a sweeper, or that
+ // a sweeper didn't call sweep.active.end when it should have.
+ // Both cases indicate a bug, so throw.
+ if sweep.active.sweepers() != 0 {
+ throw("active sweepers found at start of mark phase")
+ }
+
// Reset all the unswept buffers, which should be empty.
// Do this in sweep termination as opposed to mark termination
// so that we can catch unswept spans and reclaim blocks as
}
}
-// sweepLocker acquires sweep ownership of spans and blocks sweep
-// completion.
+// sweepLocker acquires sweep ownership of spans.
type sweepLocker struct {
// sweepGen is the sweep generation of the heap.
sweepGen uint32
- // blocking indicates that this tracker is blocking sweep
- // completion, usually as a result of acquiring sweep
- // ownership of at least one span.
- blocking bool
+ valid bool
}
// sweepLocked represents sweep ownership of a span.
*mspan
}
-func newSweepLocker() sweepLocker {
- return sweepLocker{
- sweepGen: mheap_.sweepgen,
- }
-}
-
// tryAcquire attempts to acquire sweep ownership of span s. If it
// successfully acquires ownership, it blocks sweep completion.
func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) {
+ if !l.valid {
+ throw("use of invalid sweepLocker")
+ }
// Check before attempting to CAS.
if atomic.Load(&s.sweepgen) != l.sweepGen-2 {
return sweepLocked{}, false
}
- // Add ourselves to sweepers before potentially taking
- // ownership.
- l.blockCompletion()
// Attempt to acquire sweep ownership of s.
if !atomic.Cas(&s.sweepgen, l.sweepGen-2, l.sweepGen-1) {
return sweepLocked{}, false
return sweepLocked{s}, true
}
-// blockCompletion blocks sweep completion without acquiring any
-// specific spans.
-func (l *sweepLocker) blockCompletion() {
- if !l.blocking {
- atomic.Xadd(&mheap_.sweepers, +1)
- l.blocking = true
- }
-}
-
-func (l *sweepLocker) dispose() {
- if !l.blocking {
- return
- }
- // Decrement the number of active sweepers and if this is the
- // last one, mark sweep as complete.
- l.blocking = false
- if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepDrained) != 0 {
- l.sweepIsDone()
- }
-}
-
-func (l *sweepLocker) sweepIsDone() {
- if debug.gcpacertrace > 0 {
- print("pacer: sweep done at heap size ", gcController.heapLive>>20, "MB; allocated ", (gcController.heapLive-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept.Load(), " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
- }
-}
-
// sweepone sweeps some unswept heap span and returns the number of pages returned
// to the heap, or ^uintptr(0) if there was nothing to sweep.
func sweepone() uintptr {
- _g_ := getg()
+ gp := getg()
- // increment locks to ensure that the goroutine is not preempted
+ // Increment locks to ensure that the goroutine is not preempted
// in the middle of sweep thus leaving the span in an inconsistent state for next GC
- _g_.m.locks++
- if atomic.Load(&mheap_.sweepDrained) != 0 {
- _g_.m.locks--
- return ^uintptr(0)
- }
+ gp.m.locks++
+
// TODO(austin): sweepone is almost always called in a loop;
// lift the sweepLocker into its callers.
- sl := newSweepLocker()
+ sl := sweep.active.begin()
+ if !sl.valid {
+ gp.m.locks--
+ return ^uintptr(0)
+ }
// Find a span to sweep.
npages := ^uintptr(0)
for {
s := mheap_.nextSpanForSweep()
if s == nil {
- noMoreWork = atomic.Cas(&mheap_.sweepDrained, 0, 1)
+ noMoreWork = sweep.active.markDrained()
break
}
if state := s.state.get(); state != mSpanInUse {
break
}
}
-
- sl.dispose()
+ sweep.active.end(sl)
if noMoreWork {
// The sweep list is empty. There may still be
readyForScavenger()
}
- _g_.m.locks--
+ gp.m.locks--
return npages
}
// GC runs; to prevent that the caller must be non-preemptible or must
// somehow block GC progress.
func isSweepDone() bool {
- // Check that all spans have at least begun sweeping and there
- // are no active sweepers. If both are true, then all spans
- // have finished sweeping.
- return atomic.Load(&mheap_.sweepDrained) != 0 && atomic.Load(&mheap_.sweepers) == 0
+ return sweep.active.isDone()
}
// Returns only when span s has been swept.
throw("mspan.ensureSwept: m is not locked")
}
- sl := newSweepLocker()
- // The caller must be sure that the span is a mSpanInUse span.
- if s, ok := sl.tryAcquire(s); ok {
- s.sweep(false)
- sl.dispose()
- return
+ // If this operation fails, then that means that there are
+ // no more spans to be swept. In this case, either s has already
+ // been swept, or is about to be acquired for sweeping and swept.
+ sl := sweep.active.begin()
+ if sl.valid {
+ // The caller must be sure that the span is a mSpanInUse span.
+ if s, ok := sl.tryAcquire(s); ok {
+ s.sweep(false)
+ sweep.active.end(sl)
+ return
+ }
+ sweep.active.end(sl)
}
- sl.dispose()
- // unfortunate condition, and we don't have efficient means to wait
+ // Unfortunately we can't sweep the span ourselves. Somebody else
+ // got to it first. We don't have efficient means to wait, but that's
+ // OK, it will be swept fairly soon.
for {
spangen := atomic.Load(&s.sweepgen)
if spangen == sl.sweepGen || spangen == sl.sweepGen+3 {