func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
pp := (*pageAlloc)(p)
systemstack(func() {
- r = pp.scavenge(nbytes)
+ r = pp.scavenge(nbytes, nil)
})
return
}
// want to claim was done by this call.
workFlushed := -gcw.heapScanWork
+ // In addition to backing out because of a preemption, back out
+ // if the GC CPU limiter is enabled.
gp := getg().m.curg
- for !gp.preempt && workFlushed+gcw.heapScanWork < scanWork {
+ for !gp.preempt && !gcCPULimiter.limiting() && workFlushed+gcw.heapScanWork < scanWork {
// See gcDrain comment.
if work.full == 0 {
gcw.balance()
if s.scavenge == nil {
s.scavenge = func(n uintptr) (uintptr, int64) {
start := nanotime()
- r := mheap_.pages.scavenge(n)
+ r := mheap_.pages.scavenge(n, nil)
end := nanotime()
if start >= end {
return r, 0
//
// scavenge always tries to scavenge nbytes worth of memory, and will
// only fail to do so if the heap is exhausted for now.
-func (p *pageAlloc) scavenge(nbytes uintptr) uintptr {
+func (p *pageAlloc) scavenge(nbytes uintptr, shouldStop func() bool) uintptr {
released := uintptr(0)
for released < nbytes {
ci, pageIdx := p.scav.index.find()
systemstack(func() {
released += p.scavengeOne(ci, pageIdx, nbytes-released)
})
+ if shouldStop != nil && shouldStop() {
+ break
+ }
}
return released
}
// Measure how long we spent scavenging and add that measurement to the assist
// time so we can track it for the GC CPU limiter.
start := nanotime()
- h.pages.scavenge(bytesToScavenge)
+ h.pages.scavenge(bytesToScavenge, func() bool {
+ return gcCPULimiter.limiting()
+ })
now := nanotime()
h.pages.scav.assistTime.Add(now - start)
gcCPULimiter.addAssistTime(now - start)
gp := getg()
gp.m.mallocing++
- released := h.pages.scavenge(^uintptr(0))
+ released := h.pages.scavenge(^uintptr(0), nil)
gp.m.mallocing--