// Add in frees. readmemstats_m flushed the cached stats, so
// these are up-to-date.
- var largeFree, smallFree uint64
+ var tinyAllocs, largeFree, smallFree uint64
for _, p := range allp {
c := p.mcache
if c == nil {
largeFree += uint64(c.local_largefree)
slow.Frees += uint64(c.local_nlargefree)
+ // Collect tiny allocation stats.
+ tinyAllocs += uint64(c.local_tinyallocs)
+
// Collect per-sizeclass stats.
for i := 0; i < _NumSizeClasses; i++ {
slow.Frees += uint64(c.local_nsmallfree[i])
smallFree += uint64(c.local_nsmallfree[i]) * uint64(class_to_size[i])
}
}
- slow.Frees += memstats.tinyallocs
+ slow.Frees += tinyAllocs
slow.Mallocs += slow.Frees
slow.TotalAlloc = slow.Alloc + largeFree + smallFree
// tiny is a heap pointer. Since mcache is in non-GC'd memory,
// we handle it by clearing it in releaseAll during mark
// termination.
- tiny uintptr
- tinyoffset uintptr
- local_tinyallocs uintptr // number of tiny allocs not counted in other stats
+ tiny uintptr
+ tinyoffset uintptr
// The rest is not accessed on every malloc.
// When read with stats from other mcaches and with the world
// stopped, the result will accurately reflect the state of the
// application.
+ local_tinyallocs uintptr // number of tiny allocs not counted in other stats
local_largealloc uintptr // bytes allocated for large objects
local_nlargealloc uintptr // number of large object allocations
local_nsmallalloc [_NumSizeClasses]uintptr // number of allocs for small objects
d.local_nsmallfree[i] += c.local_nsmallfree[i]
c.local_nsmallfree[i] = 0
}
+ d.local_tinyallocs += c.local_tinyallocs
+ c.local_tinyallocs = 0
}
// refill acquires a new span of span class spc for c. This span will
}
atomic.Xadd64(&memstats.heap_scan, int64(c.local_scan))
c.local_scan = 0
- memstats.tinyallocs += uint64(c.local_tinyallocs)
- c.local_tinyallocs = 0
// heap_scan was been updated.
if gcBlackenEnabled != 0 {
lock(&h.lock)
atomic.Xadd64(&memstats.heap_scan, int64(c.local_scan))
c.local_scan = 0
- memstats.tinyallocs += uint64(c.local_tinyallocs)
- c.local_tinyallocs = 0
if msanenabled {
// Tell msan that this entire span is no longer in use.
base := unsafe.Pointer(s.base())
memstats.total_alloc = 0
memstats.nmalloc = 0
memstats.nfree = 0
+ memstats.tinyallocs = 0
for i := 0; i < len(memstats.by_size); i++ {
memstats.by_size[i].nmalloc = 0
memstats.by_size[i].nfree = 0
totalFree += uint64(c.local_largefree)
memstats.nfree += uint64(c.local_nlargefree)
+ // Collect tiny allocation stats.
+ memstats.tinyallocs += uint64(c.local_tinyallocs)
+
// Collect per-sizeclass stats.
for i := 0; i < _NumSizeClasses; i++ {
// Malloc stats.
// Protected by heap lock.
atomic.Xadd64(&memstats.heap_scan, int64(c.local_scan))
c.local_scan = 0
- memstats.tinyallocs += uint64(c.local_tinyallocs)
- c.local_tinyallocs = 0
}
// Atomically increases a given *system* memory stat. We are counting on this