// pages by the time the allocated heap reaches the GC
// trigger. Compute the ratio of in-use pages to sweep
// per byte allocated.
- heapDistance := int64(trigger) - int64(atomic.Load64(&memstats.heap_live))
+ heapLiveBasis := atomic.Load64(&memstats.heap_live)
+ heapDistance := int64(trigger) - int64(heapLiveBasis)
// Add a little margin so rounding errors and
// concurrent sweep are less likely to leave pages
// unswept when GC starts.
}
mheap_.sweepPagesPerByte = float64(mheap_.pagesInUse) / float64(heapDistance)
mheap_.pagesSwept = 0
- mheap_.spanBytesAlloc = 0
+ mheap_.sweepHeapLiveBasis = heapLiveBasis
}
}
// last one print trace information.
if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 {
if debug.gcpacertrace > 0 {
- print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", mheap_.spanBytesAlloc>>20, "MB of spans; swept ", mheap_.pagesSwept, " pages at ", sweepRatio, " pages/byte\n")
+ print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", (memstats.heap_live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept, " pages at ", sweepRatio, " pages/byte\n")
}
}
_g_.m.locks--
//
// deductSweepCredit makes a worst-case assumption that all spanBytes
// bytes of the ultimately allocated span will be available for object
-// allocation. The caller should call reimburseSweepCredit if that
-// turns out not to be the case once the span is allocated.
+// allocation.
//
// deductSweepCredit is the core of the "proportional sweep" system.
// It uses statistics gathered by the garbage collector to perform
traceGCSweepStart()
}
- // Account for this span allocation.
- spanBytesAlloc := atomic.Xadd64(&mheap_.spanBytesAlloc, int64(spanBytes))
-
// Fix debt if necessary.
- pagesOwed := int64(mheap_.sweepPagesPerByte * float64(spanBytesAlloc))
- for pagesOwed-int64(atomic.Load64(&mheap_.pagesSwept)) > int64(callerSweepPages) {
+ newHeapLive := uintptr(atomic.Load64(&memstats.heap_live)-mheap_.sweepHeapLiveBasis) + spanBytes
+ pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
+ for pagesTarget > int64(atomic.Load64(&mheap_.pagesSwept)) {
if gosweepone() == ^uintptr(0) {
mheap_.sweepPagesPerByte = 0
break
traceGCSweepDone()
}
}
-
-// reimburseSweepCredit records that unusableBytes bytes of a
-// just-allocated span are not available for object allocation. This
-// offsets the worst-case charge performed by deductSweepCredit.
-func reimburseSweepCredit(unusableBytes uintptr) {
- if mheap_.sweepPagesPerByte == 0 {
- // Nobody cares about the credit. Avoid the atomic.
- return
- }
- nval := atomic.Xadd64(&mheap_.spanBytesAlloc, -int64(unusableBytes))
- if int64(nval) < 0 {
- // Debugging for #18043.
- print("runtime: bad spanBytesAlloc=", nval, " (was ", nval+uint64(unusableBytes), ") unusableBytes=", unusableBytes, " sweepPagesPerByte=", mheap_.sweepPagesPerByte, "\n")
- throw("spanBytesAlloc underflow")
- }
-}
_ uint32 // align uint64 fields on 32-bit for atomics
// Proportional sweep
- pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock
- spanBytesAlloc uint64 // bytes of spans allocated this cycle; updated atomically
- pagesSwept uint64 // pages swept this cycle; updated atomically
- sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without
+ pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock
+ pagesSwept uint64 // pages swept this cycle; updated atomically
+ sweepHeapLiveBasis uint64 // value of heap_live to use as the origin of sweep ratio; written with lock, read without
+ sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without
// TODO(austin): pagesInUse should be a uintptr, but the 386
// compiler can't 8-byte align fields.