}
}
}
- c.local_cachealloc += size
} else {
var s *mspan
shouldhelpgc = true
type mcache struct {
// The following members are accessed on every malloc,
// so they are grouped here for better caching.
- next_sample int32 // trigger heap sample after allocating this many bytes
- local_cachealloc uintptr // bytes allocated from cache since last lock of heap
- local_scan uintptr // bytes of scannable heap allocated
+ next_sample int32 // trigger heap sample after allocating this many bytes
+ local_scan uintptr // bytes of scannable heap allocated
// Allocator cache for tiny objects w/o pointers.
// See "Tiny allocator" comment in malloc.go.
if usedBytes > 0 {
reimburseSweepCredit(usedBytes)
}
+ atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
+ if trace.enabled {
+ // heap_live changed.
+ traceHeapAlloc()
+ }
+ if gcBlackenEnabled != 0 {
+ // heap_live changed.
+ gcController.revise()
+ }
if s.freelist.ptr() == nil {
throw("freelist empty")
}
if n > 0 {
c.empty.remove(s)
c.nonempty.insert(s)
+ // mCentral_CacheSpan conservatively counted
+ // unallocated slots in heap_live. Undo this.
+ atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
}
unlock(&c.lock)
}
// is approximately the amount of heap that was allocated
// since marking began).
allocatedDuringCycle := memstats.heap_live - work.initialHeapLive
+ if memstats.heap_live < work.initialHeapLive {
+ // This can happen if mCentral_UncacheSpan tightens
+ // the heap_live approximation.
+ allocatedDuringCycle = 0
+ }
if work.bytesMarked >= allocatedDuringCycle {
memstats.heap_reachable = work.bytesMarked - allocatedDuringCycle
} else {
throw("next_gc underflow")
}
- // Update other GC heap size stats.
+ // Update other GC heap size stats. This must happen after
+ // cachestats (which flushes local statistics to these) and
+ // flushallmcaches (which modifies heap_live).
memstats.heap_live = work.bytesMarked
memstats.heap_marked = work.bytesMarked
memstats.heap_scan = uint64(gcController.scanWork)
}
// transfer stats from cache to global
- memstats.heap_live += uint64(_g_.m.mcache.local_cachealloc)
- _g_.m.mcache.local_cachealloc = 0
memstats.heap_scan += uint64(_g_.m.mcache.local_scan)
_g_.m.mcache.local_scan = 0
memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs)
h.pagesInUse += uint64(npage)
if large {
memstats.heap_objects++
- memstats.heap_live += uint64(npage << _PageShift)
+ atomic.Xadd64(&memstats.heap_live, int64(npage<<_PageShift))
// Swept spans are at the end of lists.
if s.npages < uintptr(len(h.free)) {
h.busy[s.npages].insertBack(s)
systemstack(func() {
mp := getg().m
lock(&h.lock)
- memstats.heap_live += uint64(mp.mcache.local_cachealloc)
- mp.mcache.local_cachealloc = 0
memstats.heap_scan += uint64(mp.mcache.local_scan)
mp.mcache.local_scan = 0
memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs)
memstats.heap_objects--
}
if gcBlackenEnabled != 0 {
+ // heap_scan changed.
gcController.revise()
}
h.freeSpanLocked(s, true, true, 0)
- if trace.enabled {
- traceHeapAlloc()
- }
unlock(&h.lock)
})
}
// Statistics about garbage collector.
// Protected by mheap or stopping the world during GC.
- next_gc uint64 // next gc (in heap_alloc time)
+ next_gc uint64 // next gc (in heap_live time)
last_gc uint64 // last gc (in absolute time)
pause_total_ns uint64
pause_ns [256]uint64 // circular buffer of recent gc pause lengths
// heap_live is the number of bytes considered live by the GC.
// That is: retained by the most recent GC plus allocated
- // since then. heap_live <= heap_alloc, since heap_live
- // excludes unmarked objects that have not yet been swept.
+ // since then. heap_live <= heap_alloc, since heap_alloc
+ // includes unmarked objects that have not yet been swept (and
+ // hence goes up as we allocate and down as we sweep) while
+ // heap_live excludes these objects (and hence only goes up
+ // between GCs).
+ //
+ // This is updated atomically without locking. To reduce
+ // contention, this is updated only when obtaining a span from
+ // an mcentral and at this point it counts all of the
+ // unallocated slots in that span (which will be allocated
+ // before that mcache obtains another span from that
+ // mcentral). Hence, it slightly overestimates the "true" live
+ // heap size. It's better to overestimate than to
+ // underestimate because 1) this triggers the GC earlier than
+ // necessary rather than potentially too late and 2) this
+ // leads to a conservative GC rate rather than a GC rate that
+ // is potentially too low.
+ //
+ // Whenever this is updated, call traceHeapAlloc() and
+ // gcController.revise().
heap_live uint64
// heap_scan is the number of bytes of "scannable" heap. This
// is the live heap (as counted by heap_live), but omitting
// no-scan objects and no-scan tails of objects.
+ //
+ // Whenever this is updated, call gcController.revise().
heap_scan uint64
// heap_marked is the number of bytes marked by the previous
func purgecachedstats(c *mcache) {
// Protected by either heap or GC lock.
h := &mheap_
- memstats.heap_live += uint64(c.local_cachealloc)
- c.local_cachealloc = 0
- if trace.enabled {
- traceHeapAlloc()
- }
memstats.heap_scan += uint64(c.local_scan)
c.local_scan = 0
memstats.tinyallocs += uint64(c.local_tinyallocs)