s.limit = 0 // prevent mlookup from finding this span
sysFault(unsafe.Pointer(s.base()), size)
} else {
- mheap_.freeSpan(s, true)
+ mheap_.freeSpan(s)
}
c.local_nlargefree++
c.local_largefree += size
// update stats, sweep lists
h.pagesInUse += uint64(npage)
if large {
- memstats.heap_objects++
mheap_.largealloc += uint64(s.elemsize)
mheap_.nlargealloc++
atomic.Xadd64(&memstats.heap_live, int64(npage<<_PageShift))
}
// Free the span back into the heap.
-//
-// large must match the value of large passed to mheap.alloc. This is
-// used for accounting.
-func (h *mheap) freeSpan(s *mspan, large bool) {
+func (h *mheap) freeSpan(s *mspan) {
systemstack(func() {
mp := getg().m
lock(&h.lock)
bytes := s.npages << _PageShift
msanfree(base, bytes)
}
- if large {
- // Match accounting done in mheap.alloc.
- memstats.heap_objects--
- }
if gcBlackenEnabled != 0 {
// heap_scan changed.
gcController.revise()
heap_idle uint64 // bytes in idle spans
heap_inuse uint64 // bytes in mSpanInUse spans
heap_released uint64 // bytes released to the os
- heap_objects uint64 // total number of allocated objects
+
+ // heap_objects is not used by the runtime directly and instead
+ // computed on the fly by updatememstats.
+ heap_objects uint64 // total number of allocated objects
// Statistics about allocation of low-level fixed-size structures.
// Protected by FixAlloc locks.