// Add.
func NewAddrRanges() AddrRanges {
r := addrRanges{}
- r.init(new(uint64))
+ r.init(new(sysMemStat))
return AddrRanges{r, true}
}
return AddrRanges{addrRanges{
ranges: ranges,
totalBytes: total,
- sysStat: new(uint64),
+ sysStat: new(sysMemStat),
}, false}
}
dumpint(memstats.nmalloc)
dumpint(memstats.nfree)
dumpint(memstats.heap_alloc)
- dumpint(memstats.heap_sys)
+ dumpint(memstats.heap_sys.load())
dumpint(memstats.heap_idle)
dumpint(memstats.heap_inuse)
dumpint(memstats.heap_released)
dumpint(memstats.heap_objects)
dumpint(memstats.stacks_inuse)
- dumpint(memstats.stacks_sys)
+ dumpint(memstats.stacks_sys.load())
dumpint(memstats.mspan_inuse)
- dumpint(memstats.mspan_sys)
+ dumpint(memstats.mspan_sys.load())
dumpint(memstats.mcache_inuse)
- dumpint(memstats.mcache_sys)
- dumpint(memstats.buckhash_sys)
- dumpint(memstats.gc_sys)
- dumpint(memstats.other_sys)
+ dumpint(memstats.mcache_sys.load())
+ dumpint(memstats.buckhash_sys.load())
+ dumpint(memstats.gc_sys.load())
+ dumpint(memstats.other_sys.load())
dumpint(memstats.next_gc)
dumpint(memstats.last_gc_unix)
dumpint(memstats.pause_total_ns)
// The returned memory will be zeroed.
//
// Consider marking persistentalloc'd types go:notinheap.
-func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
+func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
var p *notInHeap
systemstack(func() {
p = persistentalloc1(size, align, sysStat)
// Must run on system stack because stack growth can (re)invoke it.
// See issue 9174.
//go:systemstack
-func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap {
+func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
const (
maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
)
}
if sysStat != &memstats.other_sys {
- mSysStatInc(sysStat, size)
- mSysStatDec(&memstats.other_sys, size)
+ sysStat.add(int64(size))
+ memstats.other_sys.add(-int64(size))
}
return p
}
l.end = base + size
}
-func (l *linearAlloc) alloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
+func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
p := alignUp(l.next, align)
if p+size > l.end {
return nil
// Don't split the stack as this method may be invoked without a valid G, which
// prevents us from allocating more stack.
//go:nosplit
-func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
+func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 {
if err == _EACCES {
}
return nil
}
- mSysStatInc(sysStat, n)
+ sysStat.add(int64(n))
return p
}
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit
-func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
- mSysStatDec(sysStat, n)
+func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
+ sysStat.add(-int64(n))
munmap(v, n)
}
return p
}
-func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
- mSysStatInc(sysStat, n)
+func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
+ sysStat.add(int64(n))
// AIX does not allow mapping a range that is already mapped.
// So, call mprotect to change permissions.
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit
-func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
+func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 {
return nil
}
- mSysStatInc(sysStat, n)
+ sysStat.add(int64(n))
return v
}
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit
-func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
- mSysStatDec(sysStat, n)
+func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
+ sysStat.add(-int64(n))
munmap(v, n)
}
const _sunosEAGAIN = 11
const _ENOMEM = 12
-func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
- mSysStatInc(sysStat, n)
+func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
+ sysStat.add(int64(n))
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if err == _ENOMEM || ((GOOS == "solaris" || GOOS == "illumos") && err == _sunosEAGAIN) {
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit
-func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
+func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 {
return nil
}
- mSysStatInc(sysStat, n)
+ sysStat.add(int64(n))
return v
}
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit
-func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
- mSysStatDec(sysStat, n)
+func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
+ sysStat.add(-int64(n))
munmap(v, n)
}
const _ENOMEM = 12
-func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
- mSysStatInc(sysStat, n)
+func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
+ sysStat.add(int64(n))
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if err == _ENOMEM {
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit
-func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
+func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
p := sysReserve(nil, n)
sysMap(p, n, sysStat)
return p
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit
-func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
- mSysStatDec(sysStat, n)
+func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
+ sysStat.add(-int64(n))
}
func sysFault(v unsafe.Pointer, n uintptr) {
// This allows the front-end to replace the old DataView object with a new one.
func resetMemoryDataView()
-func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
- mSysStatInc(sysStat, n)
+func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
+ sysStat.add(int64(n))
}
// Don't split the stack as this method may be invoked without a valid G, which
// prevents us from allocating more stack.
//go:nosplit
-func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
+func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 {
if err == _EACCES {
}
return nil
}
- mSysStatInc(sysStat, n)
+ sysStat.add(int64(n))
return p
}
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit
-func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
- mSysStatDec(sysStat, n)
+func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
+ sysStat.add(-int64(n))
munmap(v, n)
}
return p
}
-func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
- mSysStatInc(sysStat, n)
+func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
+ sysStat.add(int64(n))
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if err == _ENOMEM {
return unsafe.Pointer(bl)
}
-func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
+func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
lock(&memlock)
p := memAlloc(n)
memCheck()
unlock(&memlock)
if p != nil {
- mSysStatInc(sysStat, n)
+ sysStat.add(int64(n))
}
return p
}
-func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
- mSysStatDec(sysStat, n)
+func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
+ sysStat.add(-int64(n))
lock(&memlock)
if uintptr(v)+n == bloc {
// Address range being freed is at the end of memory,
func sysHugePage(v unsafe.Pointer, n uintptr) {
}
-func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
+func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
// sysReserve has already allocated all heap memory,
// but has not adjusted stats.
- mSysStatInc(sysStat, n)
+ sysStat.add(int64(n))
}
func sysFault(v unsafe.Pointer, n uintptr) {
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit
-func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
- mSysStatInc(sysStat, n)
+func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
+ sysStat.add(int64(n))
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE))
}
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit
-func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
- mSysStatDec(sysStat, n)
+func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
+ sysStat.add(-int64(n))
r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)
if r == 0 {
print("runtime: VirtualFree of ", n, " bytes failed with errno=", getlasterror(), "\n")
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE))
}
-func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
- mSysStatInc(sysStat, n)
+func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
+ sysStat.add(int64(n))
}
chunk uintptr // use uintptr instead of unsafe.Pointer to avoid write barriers
nchunk uint32
inuse uintptr // in-use bytes now
- stat *uint64
+ stat *sysMemStat
zero bool // zero allocations
}
// Initialize f to allocate objects of the given size,
// using the allocator to obtain chunks of memory.
-func (f *fixalloc) init(size uintptr, first func(arg, p unsafe.Pointer), arg unsafe.Pointer, stat *uint64) {
+func (f *fixalloc) init(size uintptr, first func(arg, p unsafe.Pointer), arg unsafe.Pointer, stat *sysMemStat) {
f.size = size
f.first = first
f.arg = arg
// heapRetained returns an estimate of the current heap RSS.
func heapRetained() uint64 {
- return atomic.Load64(&memstats.heap_sys) - atomic.Load64(&memstats.heap_released)
+ return memstats.heap_sys.load() - atomic.Load64(&memstats.heap_released)
}
// gcPaceScavenger updates the scavenger's pacing, particularly
// Update global accounting only when not in test, otherwise
// the runtime's accounting will be wrong.
- mSysStatInc(&memstats.heap_released, uintptr(npages)*pageSize)
+ atomic.Xadd64(&memstats.heap_released, int64(npages)*pageSize)
return addr
}
// sysUsed all the pages that are actually available
// in the span since some of them might be scavenged.
sysUsed(unsafe.Pointer(base), nbytes)
- mSysStatDec(&memstats.heap_released, scav)
+ atomic.Xadd64(&memstats.heap_released, -int64(scav))
}
// Update stats.
switch typ {
case spanAllocHeap:
- mSysStatInc(&memstats.heap_inuse, nbytes)
+ atomic.Xadd64(&memstats.heap_inuse, int64(nbytes))
case spanAllocStack:
- mSysStatInc(&memstats.stacks_inuse, nbytes)
+ atomic.Xadd64(&memstats.stacks_inuse, int64(nbytes))
case spanAllocPtrScalarBits, spanAllocWorkBuf:
- mSysStatInc(&memstats.gc_sys, nbytes)
+ memstats.gc_sys.add(int64(nbytes))
}
if typ.manual() {
// Manually managed memory doesn't count toward heap_sys.
- mSysStatDec(&memstats.heap_sys, nbytes)
+ memstats.heap_sys.add(-int64(nbytes))
}
- mSysStatDec(&memstats.heap_idle, nbytes)
+ atomic.Xadd64(&memstats.heap_idle, -int64(nbytes))
// Publish the span in various locations.
// The allocation is always aligned to the heap arena
// size which is always > physPageSize, so its safe to
// just add directly to heap_released.
- mSysStatInc(&memstats.heap_released, asize)
- mSysStatInc(&memstats.heap_idle, asize)
+ atomic.Xadd64(&memstats.heap_released, int64(asize))
+ atomic.Xadd64(&memstats.heap_idle, int64(asize))
// Recalculate nBase.
// We know this won't overflow, because sysAlloc returned
// Update stats.
//
// Mirrors the code in allocSpan.
+ nbytes := s.npages * pageSize
switch typ {
case spanAllocHeap:
- mSysStatDec(&memstats.heap_inuse, s.npages*pageSize)
+ atomic.Xadd64(&memstats.heap_inuse, -int64(nbytes))
case spanAllocStack:
- mSysStatDec(&memstats.stacks_inuse, s.npages*pageSize)
+ atomic.Xadd64(&memstats.stacks_inuse, -int64(nbytes))
case spanAllocPtrScalarBits, spanAllocWorkBuf:
- mSysStatDec(&memstats.gc_sys, s.npages*pageSize)
+ memstats.gc_sys.add(-int64(nbytes))
}
if typ.manual() {
- mSysStatInc(&memstats.heap_sys, s.npages*pageSize)
+ // Manually managed memory doesn't count toward heap_sys, so add it back.
+ memstats.heap_sys.add(int64(nbytes))
}
- mSysStatInc(&memstats.heap_idle, s.npages*pageSize)
+ atomic.Xadd64(&memstats.heap_idle, int64(nbytes))
// Mark the space as free.
h.pages.free(s.base(), s.npages)
// sysStat is the runtime memstat to update when new system
// memory is committed by the pageAlloc for allocation metadata.
- sysStat *uint64
+ sysStat *sysMemStat
// Whether or not this struct is being used in tests.
test bool
}
-func (p *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
+func (p *pageAlloc) init(mheapLock *mutex, sysStat *sysMemStat) {
if levelLogPages[0] > logMaxPackedValue {
// We can't represent 1<<levelLogPages[0] pages, the maximum number
// of pages we need to represent at the root level, in a summary, which
totalBytes uintptr
// sysStat is the stat to track allocations by this type
- sysStat *uint64
+ sysStat *sysMemStat
}
-func (a *addrRanges) init(sysStat *uint64) {
+func (a *addrRanges) init(sysStat *sysMemStat) {
ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
ranges.len = 0
ranges.cap = 16
import (
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
//
// Like MemStats, heap_sys and heap_inuse do not count memory
// in manually-managed spans.
- heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above)
- heap_sys uint64 // virtual address space obtained from system for GC'd heap
- heap_idle uint64 // bytes in idle spans
- heap_inuse uint64 // bytes in mSpanInUse spans
- heap_released uint64 // bytes released to the os
+ heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above)
+ heap_sys sysMemStat // virtual address space obtained from system for GC'd heap
+ heap_idle uint64 // bytes in idle spans
+ heap_inuse uint64 // bytes in mSpanInUse spans
+ heap_released uint64 // bytes released to the os
// heap_objects is not used by the runtime directly and instead
// computed on the fly by updatememstats.
// Statistics about allocation of low-level fixed-size structures.
// Protected by FixAlloc locks.
- stacks_inuse uint64 // bytes in manually-managed stack spans; updated atomically or during STW
- stacks_sys uint64 // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
- mspan_inuse uint64 // mspan structures
- mspan_sys uint64
+ stacks_inuse uint64 // bytes in manually-managed stack spans; updated atomically or during STW
+ stacks_sys sysMemStat // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
+ mspan_inuse uint64 // mspan structures
+ mspan_sys sysMemStat
mcache_inuse uint64 // mcache structures
- mcache_sys uint64
- buckhash_sys uint64 // profiling bucket hash table
- gc_sys uint64 // updated atomically or during STW
- other_sys uint64 // updated atomically or during STW
+ mcache_sys sysMemStat
+ buckhash_sys sysMemStat // profiling bucket hash table
+ gc_sys sysMemStat // updated atomically or during STW
+ other_sys sysMemStat // updated atomically or during STW
// Statistics about the garbage collector.
memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
- memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys +
- memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys
+ memstats.sys = memstats.heap_sys.load() + memstats.stacks_sys.load() + memstats.mspan_sys.load() +
+ memstats.mcache_sys.load() + memstats.buckhash_sys.load() + memstats.gc_sys.load() +
+ memstats.other_sys.load()
// We also count stacks_inuse as sys memory.
memstats.sys += memstats.stacks_inuse
}
}
-// Atomically increases a given *system* memory stat. We are counting on this
-// stat never overflowing a uintptr, so this function must only be used for
-// system memory stats.
+// sysMemStat represents a global system statistic that is managed atomically.
//
-// The current implementation for little endian architectures is based on
-// xadduintptr(), which is less than ideal: xadd64() should really be used.
-// Using xadduintptr() is a stop-gap solution until arm supports xadd64() that
-// doesn't use locks. (Locks are a problem as they require a valid G, which
-// restricts their useability.)
-//
-// A side-effect of using xadduintptr() is that we need to check for
-// overflow errors.
-//go:nosplit
-func mSysStatInc(sysStat *uint64, n uintptr) {
- if sysStat == nil {
- return
- }
- if sys.BigEndian {
- atomic.Xadd64(sysStat, int64(n))
- return
- }
- if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), n); val < n {
- print("runtime: stat overflow: val ", val, ", n ", n, "\n")
- exit(2)
- }
+// This type must structurally be a uint64 so that mstats aligns with MemStats.
+type sysMemStat uint64
+
+// load atomically reads the value of the stat.
+func (s *sysMemStat) load() uint64 {
+ return atomic.Load64((*uint64)(s))
}
-// Atomically decreases a given *system* memory stat. Same comments as
-// mSysStatInc apply.
-//go:nosplit
-func mSysStatDec(sysStat *uint64, n uintptr) {
- if sysStat == nil {
- return
- }
- if sys.BigEndian {
- atomic.Xadd64(sysStat, -int64(n))
+// add atomically adds the sysMemStat by n.
+func (s *sysMemStat) add(n int64) {
+ if s == nil {
return
}
- if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), uintptr(-int64(n))); val+n < n {
- print("runtime: stat underflow: val ", val, ", n ", n, "\n")
- exit(2)
+ val := atomic.Xadd64((*uint64)(s), n)
+ if (n > 0 && int64(val) < n) || (n < 0 && int64(val)+n < n) {
+ print("runtime: val=", val, " n=", n, "\n")
+ throw("sysMemStat overflow")
}
}
exit(1)
}
mp.g0.stack.hi = stacksize // for mstart
- //mSysStatInc(&memstats.stacks_sys, stacksize) //TODO: do this?
// Tell the pthread library we won't join with this thread.
if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
exit(1)
}
g0.stack.hi = stacksize // for mstart
- mSysStatInc(&memstats.stacks_sys, stacksize)
+ memstats.stacks_sys.add(int64(stacksize))
// Tell the pthread library we won't join with this thread.
if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {