import (
"math/bits"
+ "runtime/internal/atomic"
"unsafe"
)
)
// heapRetained returns an estimate of the current heap RSS.
-//
-// mheap_.lock must be held or the world must be stopped.
func heapRetained() uint64 {
- return memstats.heap_sys - memstats.heap_released
+ return atomic.Load64(&memstats.heap_sys) - atomic.Load64(&memstats.heap_released)
}
// gcPaceScavenger updates the scavenger's pacing, particularly
// Update global accounting only when not in test, otherwise
// the runtime's accounting will be wrong.
- memstats.heap_released += uint64(npages) * pageSize
+ mSysStatInc(&memstats.heap_released, uintptr(npages)*pageSize)
}
// fillAligned returns x but with all zeroes in m-aligned
s.limit = s.base() + s.npages<<_PageShift
s.state.set(mSpanManual) // Publish the span
// Manually managed memory doesn't count toward heap_sys.
- memstats.heap_sys -= uint64(s.npages << _PageShift)
+ mSysStatDec(&memstats.heap_sys, s.npages*pageSize)
}
// This unlock acts as a release barrier. See mheap.alloc_m.
// sysUsed all the pages that are actually available
// in the span.
sysUsed(unsafe.Pointer(base), npage*pageSize)
- memstats.heap_released -= uint64(scav)
+ mSysStatDec(&memstats.heap_released, scav)
}
s := (*mspan)(h.spanalloc.alloc())
}
h.setSpans(s.base(), npage, s)
- *stat += uint64(npage << _PageShift)
- memstats.heap_idle -= uint64(npage << _PageShift)
+ // Update stats.
+ nbytes := npage * pageSize
+ mSysStatInc(stat, nbytes)
+ mSysStatDec(&memstats.heap_idle, nbytes)
return s
}
// The allocation is always aligned to the heap arena
// size which is always > physPageSize, so its safe to
// just add directly to heap_released.
- memstats.heap_released += uint64(asize)
- memstats.heap_idle += uint64(asize)
+ mSysStatInc(&memstats.heap_released, asize)
+ mSysStatInc(&memstats.heap_idle, asize)
// Recalculate nBase
nBase = alignUp(h.curArena.base+ask, physPageSize)
func (h *mheap) freeManual(s *mspan, stat *uint64) {
s.needzero = 1
lock(&h.lock)
- *stat -= uint64(s.npages << _PageShift)
- memstats.heap_sys += uint64(s.npages << _PageShift)
+ mSysStatDec(stat, s.npages*pageSize)
+ mSysStatInc(&memstats.heap_sys, s.npages*pageSize)
h.freeSpanLocked(s, false, true)
unlock(&h.lock)
}
}
if acctinuse {
- memstats.heap_inuse -= uint64(s.npages << _PageShift)
+ mSysStatDec(&memstats.heap_inuse, s.npages*pageSize)
}
if acctidle {
- memstats.heap_idle += uint64(s.npages << _PageShift)
+ mSysStatInc(&memstats.heap_idle, s.npages*pageSize)
}
// Mark the space as free.
nfree uint64 // number of frees
// Statistics about malloc heap.
- // Protected by mheap.lock
+ // Updated atomically, or with the world stopped.
//
// Like MemStats, heap_sys and heap_inuse do not count memory
// in manually-managed spans.
// Statistics about allocation of low-level fixed-size structures.
// Protected by FixAlloc locks.
- stacks_inuse uint64 // bytes in manually-managed stack spans
+ stacks_inuse uint64 // bytes in manually-managed stack spans; updated atomically or during STW
stacks_sys uint64 // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
mspan_inuse uint64 // mspan structures
mspan_sys uint64
mcache_inuse uint64 // mcache structures
mcache_sys uint64
buckhash_sys uint64 // profiling bucket hash table
- gc_sys uint64
- other_sys uint64
+ gc_sys uint64 // updated atomically or during STW
+ other_sys uint64 // updated atomically or during STW
// Statistics about garbage collector.
// Protected by mheap or stopping the world during GC.