EPOLL_CTL_DEL = 0x2
EPOLL_CTL_MOD = 0x3
EFD_CLOEXEC = 0x80000
+
+ PR_SET_VMA = 0x53564d41
+ PR_SET_VMA_ANON_NAME = 0
)
const (
SYS_FCNTL = 55
SYS_MPROTECT = 125
+ SYS_PRCTL = 172
SYS_EPOLL_CTL = 255
SYS_EPOLL_PWAIT = 319
SYS_EPOLL_CREATE1 = 329
const (
SYS_MPROTECT = 10
SYS_FCNTL = 72
+ SYS_PRCTL = 157
SYS_EPOLL_CTL = 233
SYS_EPOLL_PWAIT = 281
SYS_EPOLL_CREATE1 = 291
const (
SYS_FCNTL = 55
SYS_MPROTECT = 125
+ SYS_PRCTL = 172
SYS_EPOLL_CTL = 251
SYS_EPOLL_PWAIT = 346
SYS_EPOLL_CREATE1 = 357
SYS_EPOLL_CTL = 21
SYS_EPOLL_PWAIT = 22
SYS_FCNTL = 25
+ SYS_PRCTL = 167
SYS_MPROTECT = 226
SYS_EPOLL_PWAIT2 = 441
SYS_EVENTFD2 = 19
SYS_EPOLL_CTL = 21
SYS_EPOLL_PWAIT = 22
SYS_FCNTL = 25
+ SYS_PRCTL = 167
SYS_MPROTECT = 226
SYS_EPOLL_PWAIT2 = 441
SYS_EVENTFD2 = 19
const (
SYS_MPROTECT = 5010
SYS_FCNTL = 5070
+ SYS_PRCTL = 5153
SYS_EPOLL_CTL = 5208
SYS_EPOLL_PWAIT = 5272
SYS_EPOLL_CREATE1 = 5285
const (
SYS_FCNTL = 4055
SYS_MPROTECT = 4125
+ SYS_PRCTL = 4192
SYS_EPOLL_CTL = 4249
SYS_EPOLL_PWAIT = 4313
SYS_EPOLL_CREATE1 = 4326
const (
SYS_FCNTL = 55
SYS_MPROTECT = 125
+ SYS_PRCTL = 171
SYS_EPOLL_CTL = 237
SYS_EPOLL_PWAIT = 303
SYS_EPOLL_CREATE1 = 315
SYS_EPOLL_CTL = 21
SYS_EPOLL_PWAIT = 22
SYS_FCNTL = 25
+ SYS_PRCTL = 167
SYS_MPROTECT = 226
SYS_EPOLL_PWAIT2 = 441
SYS_EVENTFD2 = 19
const (
SYS_FCNTL = 55
SYS_MPROTECT = 125
+ SYS_PRCTL = 172
SYS_EPOLL_CTL = 250
SYS_EPOLL_PWAIT = 312
SYS_EPOLL_CREATE1 = 327
//
// Unlike (*mheap).grow, just map in everything that we
// asked for. We're likely going to use it all.
- sysMap(unsafe.Pointer(base), userArenaChunkBytes, &gcController.heapReleased)
+ sysMap(unsafe.Pointer(base), userArenaChunkBytes, &gcController.heapReleased, "user arena chunk")
sysUsed(unsafe.Pointer(base), userArenaChunkBytes, userArenaChunkBytes)
// Model the user arena as a heap span for a large object.
if l == nil {
// Use sysAllocOS instead of sysAlloc because we want to interfere
// with the runtime as little as possible, and sysAlloc updates accounting.
- l = (*dloggerImpl)(sysAllocOS(unsafe.Sizeof(dloggerImpl{})))
+ l = (*dloggerImpl)(sysAllocOS(unsafe.Sizeof(dloggerImpl{}), "debug log"))
if l == nil {
throw("failed to allocate debug log")
}
}
// Use sysAllocOS instead of sysAlloc because we want to interfere
// with the runtime as little as possible, and sysAlloc updates accounting.
- state1 := sysAllocOS(unsafe.Sizeof(readState{}) * uintptr(n))
+ state1 := sysAllocOS(unsafe.Sizeof(readState{})*uintptr(n), "debug log")
if state1 == nil {
println("failed to allocate read state for", n, "logs")
printunlock()
} else {
start, end = addr, addr+heapArenaBytes
}
- got := sysReserve(unsafe.Pointer(addr), physPageSize)
+ got := sysReserve(unsafe.Pointer(addr), physPageSize, "")
ok = (addr == uintptr(got))
if !ok {
// We were unable to get the requested reservation.
sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
}
n := nptr/8 + 1
- p := sysAlloc(n, &memstats.other_sys)
+ p := sysAlloc(n, &memstats.other_sys, "heapdump")
if p == nil {
throw("heapdump: out of memory")
}
// heap reservation.
const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
- meta := uintptr(sysReserve(nil, arenaMetaSize))
+ meta := uintptr(sysReserve(nil, arenaMetaSize, "heap reservation"))
if meta != 0 {
mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true)
}
128 << 20,
}
for _, arenaSize := range arenaSizes {
- a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
+ a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes, "heap reservation")
if a != nil {
mheap_.arena.init(uintptr(a), size, false)
p = mheap_.arena.end // For hint below
//
// Only do this if we're using the regular heap arena hints.
// This behavior is only for the heap.
- v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased)
+ v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased, "heap")
if v != nil {
size = n
goto mapped
// Outside addressable heap. Can't use.
v = nil
} else {
- v = sysReserve(unsafe.Pointer(p), n)
+ v = sysReserve(unsafe.Pointer(p), n, "heap reservation")
}
if p == uintptr(v) {
// Success. Update the hint.
// All of the hints failed, so we'll take any
// (sufficiently aligned) address the kernel will give
// us.
- v, size = sysReserveAligned(nil, n, heapArenaBytes)
+ v, size = sysReserveAligned(nil, n, heapArenaBytes, "heap")
if v == nil {
return nil, 0
}
// is paged in is too expensive. Trying to account for the whole region means
// that it will appear like an enormous memory overhead in statistics, even though
// it is not.
- l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2)))
+ l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2), "heap index"))
if l2 == nil {
throw("out of memory allocating heap arena map")
}
throw("arena already initialized")
}
var r *heapArena
- r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
+ r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys, "heap metadata"))
if r == nil {
r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
if r == nil {
// sysReserveAligned is like sysReserve, but the returned pointer is
// aligned to align bytes. It may reserve either n or n+align bytes,
// so it returns the size that was reserved.
-func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) {
+func sysReserveAligned(v unsafe.Pointer, size, align uintptr, vmaName string) (unsafe.Pointer, uintptr) {
if isSbrkPlatform {
if v != nil {
throw("unexpected heap arena hint on sbrk platform")
// for a larger region and remove the parts we don't need.
retries := 0
retry:
- p := uintptr(sysReserve(v, size+align))
+ p := uintptr(sysReserve(v, size+align, vmaName))
switch {
case p == 0:
return nil, 0
// so we may have to try again.
sysFreeOS(unsafe.Pointer(p), size+align)
p = alignUp(p, align)
- p2 := sysReserve(unsafe.Pointer(p), size)
+ p2 := sysReserve(unsafe.Pointer(p), size, vmaName)
if p != uintptr(p2) {
// Must have raced. Try again.
sysFreeOS(p2, size)
}
if size >= maxBlock {
- return (*notInHeap)(sysAlloc(size, sysStat))
+ return (*notInHeap)(sysAlloc(size, sysStat, "immortal metadata"))
}
mp := acquirem()
}
persistent.off = alignUp(persistent.off, align)
if persistent.off+size > persistentChunkSize || persistent.base == nil {
- persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
+ persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys, "immortal metadata"))
if persistent.base == nil {
if persistent == &globalAlloc.persistentAlloc {
unlock(&globalAlloc.mutex)
l.mapMemory = mapMemory
}
-func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
+func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat, vmaName string) unsafe.Pointer {
p := alignUp(l.next, align)
if p+size > l.end {
return nil
if l.mapMemory {
// Transition from Reserved to Prepared to Ready.
n := pEnd - l.mapped
- sysMap(unsafe.Pointer(l.mapped), n, sysStat)
+ sysMap(unsafe.Pointer(l.mapped), n, sysStat, vmaName)
sysUsed(unsafe.Pointer(l.mapped), n, n)
}
l.mapped = pEnd
// which prevents us from allocating more stack.
//
//go:nosplit
-func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
+func sysAlloc(n uintptr, sysStat *sysMemStat, vmaName string) unsafe.Pointer {
sysStat.add(int64(n))
gcController.mappedReady.Add(int64(n))
- return sysAllocOS(n)
+ return sysAllocOS(n, vmaName)
}
// sysUnused transitions a memory region from Ready to Prepared. It notifies the
// NOTE: sysReserve returns OS-aligned memory, but the heap allocator
// may use larger alignment, so the caller must be careful to realign the
// memory obtained by sysReserve.
-func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
- return sysReserveOS(v, n)
+func sysReserve(v unsafe.Pointer, n uintptr, vmaName string) unsafe.Pointer {
+ return sysReserveOS(v, n, vmaName)
}
// sysMap transitions a memory region from Reserved to Prepared. It ensures the
// memory region can be efficiently transitioned to Ready.
//
// sysStat must be non-nil.
-func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
+func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat, vmaName string) {
sysStat.add(int64(n))
- sysMapOS(v, n)
+ sysMapOS(v, n, vmaName)
}
// prevents us from allocating more stack.
//
//go:nosplit
-func sysAllocOS(n uintptr) unsafe.Pointer {
+func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 {
if err == _EACCES {
mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
}
-func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
+func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 {
return nil
return p
}
-func sysMapOS(v unsafe.Pointer, n uintptr) {
+func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
// AIX does not allow mapping a range that is already mapped.
// So, call mprotect to change permissions.
// Note that sysMap is always called with a non-nil pointer
// which prevents us from allocating more stack.
//
//go:nosplit
-func sysAllocOS(n uintptr) unsafe.Pointer {
+func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 {
return nil
// Indicates not to reserve swap space for the mapping.
const _sunosMAP_NORESERVE = 0x40
-func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
+func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
flags := int32(_MAP_ANON | _MAP_PRIVATE)
if GOOS == "solaris" || GOOS == "illumos" {
// Be explicit that we don't want to reserve swap space
const _sunosEAGAIN = 11
const _ENOMEM = 12
-func sysMapOS(v unsafe.Pointer, n uintptr) {
+func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if err == _ENOMEM || ((GOOS == "solaris" || GOOS == "illumos") && err == _sunosEAGAIN) {
throw("runtime: out of memory")
// which prevents us from allocating more stack.
//
//go:nosplit
-func sysAllocOS(n uintptr) unsafe.Pointer {
+func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 {
return nil
mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
}
-func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
+func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 {
return nil
const _ENOMEM = 12
-func sysMapOS(v unsafe.Pointer, n uintptr) {
+func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if err == _ENOMEM {
throw("runtime: out of memory")
// prevents us from allocating more stack.
//
//go:nosplit
-func sysAllocOS(n uintptr) unsafe.Pointer {
+func sysAllocOS(n uintptr, vmaName string) unsafe.Pointer {
p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 {
if err == _EACCES {
}
return nil
}
+ setVMAName(p, n, vmaName)
return p
}
// Fall back on mmap if it's not supported.
// _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE will unmap all the
// pages in the old mapping, and remap the memory region.
- mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
+ p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
+ if err == 0 && p != nil {
+ setVMAName(p, n, "unused")
+ }
}
if debug.harddecommit > 0 {
if p != v || err != 0 {
throw("runtime: cannot disable permissions in address space")
}
+ setVMAName(p, n, "unused")
}
}
if p != v || err != 0 {
throw("runtime: cannot remap pages in address space")
}
+ setVMAName(p, n, "used")
return
}
}
madvise(v, n, _MADV_DONTNEED)
}
-func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
+func sysReserveOS(v unsafe.Pointer, n uintptr, vmaName string) unsafe.Pointer {
p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 {
return nil
}
+ setVMAName(p, n, vmaName)
return p
}
-func sysMapOS(v unsafe.Pointer, n uintptr) {
+func sysMapOS(v unsafe.Pointer, n uintptr, vmaName string) {
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if err == _ENOMEM {
throw("runtime: out of memory")
print("runtime: mmap(", v, ", ", n, ") returned ", p, ", ", err, "\n")
throw("runtime: cannot map pages in arena address space")
}
+ setVMAName(p, n, vmaName)
// Disable huge pages if the GODEBUG for it is set.
//
blocMax = bloc
}
-func sysAllocOS(n uintptr) unsafe.Pointer {
+func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
lock(&memlock)
p := memAlloc(n)
memCheck()
func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) {
}
-func sysMapOS(v unsafe.Pointer, n uintptr) {
+func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
}
func sysFaultOS(v unsafe.Pointer, n uintptr) {
}
-func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
+func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
lock(&memlock)
var p unsafe.Pointer
if uintptr(v) == bloc {
// which prevents us from allocating more stack.
//
//go:nosplit
-func sysAllocOS(n uintptr) unsafe.Pointer {
+func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE))
}
sysUnusedOS(v, n)
}
-func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
+func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
// v is just a hint.
// First try at v.
// This will fail if any of [v, v+n) is already reserved.
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE))
}
-func sysMapOS(v unsafe.Pointer, n uintptr) {
+func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
}
}
var new []*mspan
sp := (*slice)(unsafe.Pointer(&new))
- sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys)
+ sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys, "allspans array")
if sp.array == nil {
throw("runtime: cannot allocate memory")
}
// Transition this space from Reserved to Prepared and mark it
// as released since we'll be able to start using it after updating
// the page allocator and releasing the lock at any time.
- sysMap(unsafe.Pointer(h.curArena.base), size, &gcController.heapReleased)
+ sysMap(unsafe.Pointer(h.curArena.base), size, &gcController.heapReleased, "heap")
// Update stats.
stats := memstats.heapStats.acquire()
atomic.Xaddint64(&stats.released, int64(size))
// The allocation is always aligned to the heap arena
// size which is always > physPageSize, so its safe to
// just add directly to heapReleased.
- sysMap(unsafe.Pointer(v), nBase-v, &gcController.heapReleased)
+ sysMap(unsafe.Pointer(v), nBase-v, &gcController.heapReleased, "heap")
// The memory just allocated counts as both released
// and idle, even though it's not yet backed by spans.
var result *gcBitsArena
if gcBitsArenas.free == nil {
unlock(&gcBitsArenas.lock)
- result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys))
+ result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys, "gc bits"))
if result == nil {
throw("runtime: cannot allocate memory")
}
// there should this change.
pallocChunksL2Bits = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits
pallocChunksL1Shift = pallocChunksL2Bits
+
+ vmaNamePageAllocIndex = "page alloc index"
)
// maxSearchAddr returns the maximum searchAddr value, which indicates
if p.chunks[c.l1()] == nil {
// Create the necessary l2 entry.
const l2Size = unsafe.Sizeof(*p.chunks[0])
- r := sysAlloc(l2Size, p.sysStat)
+ r := sysAlloc(l2Size, p.sysStat, vmaNamePageAllocIndex)
if r == nil {
throw("pageAlloc: out of memory")
}
totalSize = alignUp(totalSize, physPageSize)
// Reserve memory for all levels in one go. There shouldn't be much for 32-bit.
- reservation := sysReserve(nil, totalSize)
+ reservation := sysReserve(nil, totalSize, "page summary")
if reservation == nil {
throw("failed to reserve page summary memory")
}
// There isn't much. Just map it and mark it as used immediately.
- sysMap(reservation, totalSize, p.sysStat)
+ sysMap(reservation, totalSize, p.sysStat, "page summary")
sysUsed(reservation, totalSize, totalSize)
p.summaryMappedReady += totalSize
if test {
// Set up the scavenge index via sysAlloc so the test can free it later.
scavIndexSize := uintptr(len(scavengeIndexArray)) * unsafe.Sizeof(atomicScavChunkData{})
- s.chunks = ((*[(1 << heapAddrBits) / pallocChunkBytes]atomicScavChunkData)(sysAlloc(scavIndexSize, sysStat)))[:]
+ s.chunks = ((*[(1 << heapAddrBits) / pallocChunkBytes]atomicScavChunkData)(sysAlloc(scavIndexSize, sysStat, vmaNamePageAllocIndex)))[:]
mappedReady = scavIndexSize
} else {
// Set up the scavenge index.
// Reserve b bytes of memory anywhere in the address space.
b := alignUp(uintptr(entries)*pallocSumBytes, physPageSize)
- r := sysReserve(nil, b)
+ r := sysReserve(nil, b, "page summary")
if r == nil {
throw("failed to reserve page summary memory")
}
}
// Map and commit need.
- sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat)
+ sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat, "page alloc")
sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
p.summaryMappedReady += need.size()
}
// If we've got something to map, map it, and update the slice bounds.
if need.size() != 0 {
- sysMap(unsafe.Pointer(need.base.addr()), need.size(), sysStat)
+ sysMap(unsafe.Pointer(need.base.addr()), need.size(), sysStat, "scavenge index")
sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
// Update the indices only after the new memory is valid.
if haveMax == 0 || needMin < haveMin {
func (s *scavengeIndex) sysInit(test bool, sysStat *sysMemStat) uintptr {
n := uintptr(1<<heapAddrBits) / pallocChunkBytes
nbytes := n * unsafe.Sizeof(atomicScavChunkData{})
- r := sysReserve(nil, nbytes)
+ r := sysReserve(nil, nbytes, "scavenge index")
sl := notInHeapSlice{(*notInHeap)(r), int(n), int(n)}
s.chunks = *(*[]atomicScavChunkData)(unsafe.Pointer(&sl))
return 0 // All memory above is mapped Reserved.
// check again under the lock
bh = (*buckhashArray)(buckhash.Load())
if bh == nil {
- bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys))
+ bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys, "profiler hash buckets"))
if bh == nil {
throw("runtime: cannot allocate memory")
}
//
//go:nosplit
func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
- stack := sysAlloc(stacksize, &memstats.stacks_sys)
+ stack := sysAlloc(stacksize, &memstats.stacks_sys, "OS thread stack")
if stack == nil {
writeErrStr(failallocatestack)
exit(1)
//
//go:nosplit
func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
- stack := sysAlloc(stacksize, &memstats.stacks_sys)
+ stack := sysAlloc(stacksize, &memstats.stacks_sys, "OS thread stack")
if stack == nil {
writeErrStr(failallocatestack)
exit(1)
--- /dev/null
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux
+
+package runtime
+
+import (
+ "internal/runtime/atomic"
+ "internal/runtime/syscall"
+ "unsafe"
+)
+
+var prSetVMAUnsupported atomic.Bool
+
+// setVMAName calls prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, start, len, name)
+func setVMAName(start unsafe.Pointer, length uintptr, name string) {
+ if unsupported := prSetVMAUnsupported.Load(); unsupported {
+ return
+ }
+
+ var sysName [80]byte
+ n := copy(sysName[:], " Go: ")
+ copy(sysName[n:79], name) // leave final byte zero
+
+ _, _, err := syscall.Syscall6(syscall.SYS_PRCTL, syscall.PR_SET_VMA, syscall.PR_SET_VMA_ANON_NAME, uintptr(start), length, uintptr(unsafe.Pointer(&sysName[0])), 0)
+ if err == _EINVAL {
+ prSetVMAUnsupported.Store(true)
+ }
+ // ignore other errors
+}
--- /dev/null
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !linux
+
+package runtime
+
+import "unsafe"
+
+// setVMAName isn’t implemented
+func setVMAName(start unsafe.Pointer, len uintptr, name string) {}
if debug.efence != 0 || stackFromSystem != 0 {
n = uint32(alignUp(uintptr(n), physPageSize))
- v := sysAlloc(uintptr(n), &memstats.stacks_sys)
+ v := sysAlloc(uintptr(n), &memstats.stacks_sys, "goroutine stack (system)")
if v == nil {
throw("out of memory (stackalloc)")
}
unlock(&trace.lock)
} else {
unlock(&trace.lock)
- w.traceBuf = (*traceBuf)(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
+ w.traceBuf = (*traceBuf)(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys, "trace buffer"))
if w.traceBuf == nil {
throw("trace: out of memory")
}
}
// Allocate a new block.
- block = (*traceRegionAllocBlock)(sysAlloc(unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys))
+ block = (*traceRegionAllocBlock)(sysAlloc(unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys, "trace arena alloc"))
if block == nil {
throw("traceRegion: out of memory")
}
unlock(&vgetrandomAlloc.statesLock)
return 0
}
+ setVMAName(p, allocSize, "getrandom states")
newBlock := uintptr(p)
if vgetrandomAlloc.states == nil {
vgetrandomAlloc.states = make([]uintptr, 0, num)