dataSize = unsafe.Sizeof(_defer{})
}
heapBitsSetType(uintptr(x), size, dataSize, typ)
+ if dataSize > typ.size {
+ // Array allocation. If there are any
+ // pointers, GC has to scan to the last
+ // element.
+ if typ.ptrdata != 0 {
+ c.local_scan += dataSize - typ.size + typ.ptrdata
+ }
+ } else {
+ c.local_scan += typ.ptrdata
+ }
}
// GCmarkterminate allocates black
// so they are grouped here for better caching.
next_sample int32 // trigger heap sample after allocating this many bytes
local_cachealloc uintptr // bytes allocated from cache since last lock of heap
+ local_scan uintptr // bytes of scannable heap allocated
// Allocator cache for tiny objects w/o pointers.
// See "Tiny allocator" comment in malloc.go.
tiny unsafe.Pointer
// Update other GC heap size stats.
memstats.heap_live = work.bytesMarked
memstats.heap_marked = work.bytesMarked
+ memstats.heap_scan = uint64(gcController.scanWork)
if trace.enabled {
traceHeapAlloc()
// scanblock scans b as scanobject would, but using an explicit
// pointer bitmap instead of the heap bitmap.
+//
+// This is used to scan non-heap roots, so it does not update
+// gcw.bytesMarked or gcw.scanWork.
+//
//go:nowritebarrier
func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
// Use local copies of original parameters, so that a stack trace
i += ptrSize
}
}
-
- gcw.scanWork += int64(n)
}
// scanobject scans the object starting at b, adding pointers to gcw.
// transfer stats from cache to global
memstats.heap_live += uint64(_g_.m.mcache.local_cachealloc)
_g_.m.mcache.local_cachealloc = 0
+ memstats.heap_scan += uint64(_g_.m.mcache.local_scan)
+ _g_.m.mcache.local_scan = 0
memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs)
_g_.m.mcache.local_tinyallocs = 0
lock(&h.lock)
memstats.heap_live += uint64(mp.mcache.local_cachealloc)
mp.mcache.local_cachealloc = 0
+ memstats.heap_scan += uint64(mp.mcache.local_scan)
+ mp.mcache.local_scan = 0
memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs)
mp.mcache.local_tinyallocs = 0
if acct != 0 {
// excludes unmarked objects that have not yet been swept.
heap_live uint64
+ // heap_scan is the number of bytes of "scannable" heap. This
+ // is the live heap (as counted by heap_live), but omitting
+ // no-scan objects and no-scan tails of objects.
+ heap_scan uint64
+
// heap_marked is the number of bytes marked by the previous
// GC. After mark termination, heap_live == heap_marked, but
// unlike heap_live, heap_marked does not change until the
if trace.enabled {
traceHeapAlloc()
}
+ memstats.heap_scan += uint64(c.local_scan)
+ c.local_scan = 0
memstats.tinyallocs += uint64(c.local_tinyallocs)
c.local_tinyallocs = 0
memstats.nlookup += uint64(c.local_nlookup)