gcSetTriggerRatio(memstats.triggerRatio)
unlock(&mheap_.lock)
})
- // Pacing changed, so the scavenger should be awoken.
- wakeScavenger()
// If we just disabled GC, wait for any concurrent GC mark to
// finish so we always return with no GC running.
// Update GC trigger and pacing for the next cycle.
gcSetTriggerRatio(nextTriggerRatio)
- // Pacing changed, so the scavenger should be awoken.
- wakeScavenger()
-
// Update timing memstats
now := nanotime()
sec, nsec, _ := time_now()
return
}
mheap_.scavengeGoal = retainedGoal
- mheap_.pages.resetScavengeAddr()
}
// Sleep/wait state of the background scavenger.
var scavenge struct {
- lock mutex
- g *g
- parked bool
- timer *timer
+ lock mutex
+ g *g
+ parked bool
+ timer *timer
+ sysmonWake uint32 // Set atomically.
}
-// wakeScavenger unparks the scavenger if necessary. It must be called
-// after any pacing update.
+// readyForScavenger signals sysmon to wake the scavenger because
+// there may be new work to do.
//
-// mheap_.lock and scavenge.lock must not be held.
+// There may be a significant delay between when this function runs
+// and when the scavenger is kicked awake, but it may be safely invoked
+// in contexts where wakeScavenger is unsafe to call directly.
+func readyForScavenger() {
+ atomic.Store(&scavenge.sysmonWake, 1)
+}
+
+// wakeScavenger immediately unparks the scavenger if necessary.
+//
+// May run without a P, but it may allocate, so it must not be called
+// on any allocation path.
+//
+// mheap_.lock, scavenge.lock, and sched.lock must not be held.
func wakeScavenger() {
lock(&scavenge.lock)
if scavenge.parked {
+ // Notify sysmon that it shouldn't bother waking up the scavenger.
+ atomic.Store(&scavenge.sysmonWake, 0)
+
// Try to stop the timer but we don't really care if we succeed.
// It's possible that either a timer was never started, or that
// we're racing with it.
// scavenger at a "lower priority" but that's OK because it'll
// catch up on the work it missed when it does get scheduled.
scavenge.parked = false
- systemstack(func() {
- ready(scavenge.g, 0, false)
- })
+
+ // Ready the goroutine by injecting it. We use injectglist instead
+ // of ready or goready in order to allow us to run this function
+ // without a P. injectglist also avoids placing the goroutine in
+ // the current P's runnext slot, which is desireable to prevent
+ // the scavenger from interfering with user goroutine scheduling
+ // too much.
+ var list gList
+ list.push(scavenge.g)
+ injectglist(&list)
}
unlock(&scavenge.lock)
}
}
// resetScavengeAddr sets the scavenge start address to the top of the heap's
-// address space. This should be called each time the scavenger's pacing
-// changes.
+// address space. This should be called whenever the sweeper is done.
//
// s.mheapLock must be held.
func (s *pageAlloc) resetScavengeAddr() {
}
}
+ // Sweeping is done, so if the scavenger isn't already awake,
+ // wake it up. There's definitely work for it to do at this
+ // point.
+ wakeScavenger()
+
nextMarkBitArenaEpoch()
}
// Decrement the number of active sweepers and if this is the
// last one print trace information.
if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 {
+ // Since the sweeper is done, reset the scavenger's pointer
+ // into the heap and wake it if necessary.
+ //
+ // The scavenger is signaled by the last sweeper because once
+ // sweeping is done, we will definitely have useful work for
+ // the scavenger to do, since the scavenger only runs over the
+ // heap once per GC cyle. This update is not done during sweep
+ // termination because in some cases there may be a long delay
+ // between sweep done and sweep termination (e.g. not enough
+ // allocations to trigger a GC) which would be nice to fill in
+ // with scavenging work.
+ systemstack(func() {
+ lock(&mheap_.lock)
+ mheap_.pages.resetScavengeAddr()
+ unlock(&mheap_.lock)
+ })
+ // Since we might sweep in an allocation path, it's not possible
+ // for us to wake the scavenger directly via wakeScavenger, since
+ // it could allocate. Ask sysmon to do it for us instead.
+ readyForScavenger()
+
if debug.gcpacertrace > 0 {
print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", (memstats.heap_live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept, " pages at ", sweepRatio, " pages/byte\n")
}
// Try to start an M to run them.
startm(nil, false)
}
+ if atomic.Load(&scavenge.sysmonWake) != 0 {
+ // Kick the scavenger awake if someone requested it.
+ wakeScavenger()
+ }
// retake P's blocked in syscalls
// and preempt long running G's
if retake(now) != 0 {