// gcworkbuffree(c.gcworkbuf)
lock(&mheap_.lock)
- purgecachedstats(c)
// Donate anything else that's left.
c.donate(recipient)
mheap_.cachealloc.free(unsafe.Pointer(c))
// donate flushes data and resources which have no global
// pool to another mcache.
func (c *mcache) donate(d *mcache) {
+ // local_scan is handled separately because it's not
+ // like these stats -- it's used for GC pacing.
d.local_largealloc += c.local_largealloc
c.local_largealloc = 0
d.local_nlargealloc += c.local_nlargealloc
// Assume all objects from this span will be allocated in the
// mcache. If it gets uncached, we'll adjust this.
c.local_nsmallalloc[spc.sizeclass()] += uintptr(s.nelems) - uintptr(s.allocCount)
+
+ // Update heap_live with the same assumption.
usedBytes := uintptr(s.allocCount) * s.elemsize
atomic.Xadd64(&memstats.heap_live, int64(s.npages*pageSize)-int64(usedBytes))
+
+ // While we're here, flush local_scan, since we have to call
+ // revise anyway.
+ atomic.Xadd64(&memstats.heap_scan, int64(c.local_scan))
+ c.local_scan = 0
+
if trace.enabled {
// heap_live changed.
traceHeapAlloc()
}
if gcBlackenEnabled != 0 {
- // heap_live changed.
+ // heap_live and heap_scan changed.
gcController.revise()
}
}
func (c *mcache) releaseAll() {
+ // Take this opportunity to flush local_scan.
+ atomic.Xadd64(&memstats.heap_scan, int64(c.local_scan))
+ c.local_scan = 0
+
sg := mheap_.sweepgen
for i := range c.alloc {
s := c.alloc[i]
// Clear tinyalloc pool.
c.tiny = 0
c.tinyoffset = 0
+
+ // Updated heap_scan and possible heap_live.
+ if gcBlackenEnabled != 0 {
+ gcController.revise()
+ }
}
// prepareForSweep flushes c if the system has entered a new sweep phase
gcw.dispose()
}
- cachestats()
-
// Update the marked heap stat.
memstats.heap_marked = work.bytesMarked
+ // Flush local_scan from each mcache since we're about to modify
+ // heap_scan directly. If we were to flush this later, then local_scan
+ // might have incorrect information.
+ for _, p := range allp {
+ c := p.mcache
+ if c == nil {
+ continue
+ }
+ memstats.heap_scan += uint64(c.local_scan)
+ c.local_scan = 0
+ }
+
// Update other GC heap size stats. This must happen after
// cachestats (which flushes local statistics to these) and
// flushallmcaches (which modifies heap_live).
base, scav = c.alloc(npages)
if base != 0 {
s = h.tryAllocMSpan()
-
- if s != nil && gcBlackenEnabled == 0 && (manual || spanclass.sizeclass() != 0) {
+ if s != nil {
goto HaveSpan
}
- // We're either running duing GC, failed to acquire a mspan,
- // or the allocation is for a large object. This means we
- // have to lock the heap and do a bunch of extra work,
- // so go down the HaveBaseLocked path.
- //
- // We must do this during GC to avoid skew with heap_scan
- // since we flush mcache stats whenever we lock.
- //
- // TODO(mknyszek): It would be nice to not have to
- // lock the heap if it's a large allocation, but
- // it's fine for now. The critical section here is
- // short and large object allocations are relatively
- // infrequent.
+ // We have a base but no mspan, so we need
+ // to lock the heap.
}
}
// one now that we have the heap lock.
s = h.allocMSpanLocked()
}
- if !manual {
- // This is a heap span, so we should do some additional accounting
- // which may only be done with the heap locked.
-
- // Transfer stats from mcache to global.
- var c *mcache
- if gp.m.p != 0 {
- c = gp.m.p.ptr().mcache
- } else {
- // This case occurs while bootstrapping.
- // See the similar code in mallocgc.
- c = mcache0
- if c == nil {
- throw("mheap.allocSpan called with no P")
- }
- }
- atomic.Xadd64(&memstats.heap_scan, int64(c.local_scan))
- c.local_scan = 0
-
- // heap_scan was been updated.
- if gcBlackenEnabled != 0 {
- gcController.revise()
- }
- }
unlock(&h.lock)
HaveSpan:
// Free the span back into the heap.
func (h *mheap) freeSpan(s *mspan) {
systemstack(func() {
- c := getg().m.p.ptr().mcache
lock(&h.lock)
- atomic.Xadd64(&memstats.heap_scan, int64(c.local_scan))
- c.local_scan = 0
if msanenabled {
// Tell msan that this entire span is no longer in use.
base := unsafe.Pointer(s.base())
bytes := s.npages << _PageShift
msanfree(base, bytes)
}
- if gcBlackenEnabled != 0 {
- // heap_scan changed.
- gcController.revise()
- }
h.freeSpanLocked(s, true, true)
unlock(&h.lock)
})
memstats.by_size[i].nfree = 0
}
- // Aggregate local stats.
- cachestats()
-
// Collect allocation stats. This is safe and consistent
// because the world is stopped.
var smallFree, totalAlloc, totalFree uint64
memstats.heap_objects = memstats.nmalloc - memstats.nfree
}
-// cachestats flushes all mcache stats.
-//
-// The world must be stopped.
-//
-//go:nowritebarrier
-func cachestats() {
- for _, p := range allp {
- c := p.mcache
- if c == nil {
- continue
- }
- purgecachedstats(c)
- }
-}
-
// flushmcache flushes the mcache of allp[i].
//
// The world must be stopped.
}
}
-//go:nosplit
-func purgecachedstats(c *mcache) {
- // Protected by heap lock.
- atomic.Xadd64(&memstats.heap_scan, int64(c.local_scan))
- c.local_scan = 0
-}
-
// Atomically increases a given *system* memory stat. We are counting on this
// stat never overflowing a uintptr, so this function must only be used for
// system memory stats.