From e30c6d64bac1b5a31a7062dff89744332bebc23e Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Mon, 3 Aug 2015 09:25:23 -0400 Subject: [PATCH] runtime: always give concurrent sweep some heap distance Currently it's possible for the next_gc heap size trigger computed for the next GC cycle to be less than the current allocated heap size. This means the next cycle will start immediately, which means there's no time to perform the concurrent sweep between GC cycles. This places responsibility for finishing the sweep on GC itself, which delays GC start-up and hence delays mutator assist. Fix this by ensuring that next_gc is always at least a little higher than the allocated heap size, so we won't trigger the next cycle instantly. Updates #11911. Change-Id: I74f0b887bf187518d5fedffc7989817cbcf30592 Reviewed-on: https://go-review.googlesource.com/13043 Reviewed-by: Rick Hudson Reviewed-by: Russ Cox --- src/runtime/mgc.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index de9f4f51fd..f308530c5c 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -145,6 +145,11 @@ const ( // bookkeeping will use a large amount of each stack. firstStackBarrierOffset = 1024 debugStackBarrier = false + + // sweepMinHeapDistance is a lower bound on the heap distance + // (in bytes) reserved for concurrent sweeping between GC + // cycles. This will be scaled by gcpercent/100. + sweepMinHeapDistance = 1024 * 1024 ) // heapminimum is the minimum heap size at which to trigger GC. @@ -1489,6 +1494,21 @@ func gcMark(start_time int64) { memstats.heap_marked = work.bytesMarked memstats.heap_scan = uint64(gcController.scanWork) + minNextGC := memstats.heap_live + sweepMinHeapDistance*uint64(gcpercent)/100 + if memstats.next_gc < minNextGC { + // The allocated heap is already past the trigger. + // This can happen if the triggerRatio is very low and + // the reachable heap estimate is less than the live + // heap size. + // + // Concurrent sweep happens in the heap growth from + // heap_live to next_gc, so bump next_gc up to ensure + // that concurrent sweep has some heap growth in which + // to perform sweeping before we start the next GC + // cycle. + memstats.next_gc = minNextGC + } + if trace.enabled { traceHeapAlloc() traceNextGC() -- 2.50.0