From 52eaed66335e90ceb6ad65873889ccca46851ee9 Mon Sep 17 00:00:00 2001 From: =?utf8?q?L=C3=A9na=C3=AFc=20Huard?= Date: Sat, 1 Feb 2025 14:19:04 +0100 Subject: [PATCH] runtime: decorate anonymous memory mappings MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Leverage the prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ...) API to name the anonymous memory areas. This API has been introduced in Linux 5.17 to decorate the anonymous memory areas shown in /proc//maps. This is already used by glibc. See: * https://sourceware.org/git/?p=glibc.git;a=blob;f=malloc/malloc.c;h=27dfd1eb907f4615b70c70237c42c552bb4f26a8;hb=HEAD#l2434 * https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/setvmaname.c;h=ea93a5ffbebc9e5a7e32a297138f465724b4725f;hb=HEAD#l63 This can be useful when investigating the memory consumption of a multi-language program. On a 100% Go program, pprof profiler can be used to profile the memory consumption of the program. But pprof is only aware of what happens within the Go world. On a multi-language program, there could be a doubt about whether the suspicious extra-memory consumption comes from the Go part or the native part. With this change, the following Go program: package main import ( "fmt" "log" "os" ) /* #include void f(void) { (void)malloc(1024*1024*1024); } */ import "C" func main() { C.f() data, err := os.ReadFile("/proc/self/maps") if err != nil { log.Fatal(err) } fmt.Println(string(data)) } produces this output: $ GLIBC_TUNABLES=glibc.mem.decorate_maps=1 ~/doc/devel/open-source/go/bin/go run . 00400000-00402000 r--p 00000000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 00402000-004a4000 r-xp 00002000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 004a4000-00574000 r--p 000a4000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 00574000-00575000 r--p 00173000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 00575000-00580000 rw-p 00174000 00:21 28451768 /home/lenaic/.cache/go-build/9f/9f25a17baed5a80d03eb080a2ce2a5ff49c17f9a56e28330f0474a2bb74a30a0-d/test_vma_name 00580000-005a4000 rw-p 00000000 00:00 0 2e075000-2e096000 rw-p 00000000 00:00 0 [heap] c000000000-c000400000 rw-p 00000000 00:00 0 [anon: Go: heap] c000400000-c004000000 ---p 00000000 00:00 0 [anon: Go: heap reservation] 777f40000000-777f40021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f40021000-777f44000000 ---p 00000000 00:00 0 777f44000000-777f44021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f44021000-777f48000000 ---p 00000000 00:00 0 777f48000000-777f48021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f48021000-777f4c000000 ---p 00000000 00:00 0 777f4c000000-777f4c021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f4c021000-777f50000000 ---p 00000000 00:00 0 777f50000000-777f50021000 rw-p 00000000 00:00 0 [anon: glibc: malloc arena] 777f50021000-777f54000000 ---p 00000000 00:00 0 777f55afb000-777f55afc000 ---p 00000000 00:00 0 777f55afc000-777f562fc000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216378] 777f562fc000-777f562fd000 ---p 00000000 00:00 0 777f562fd000-777f56afd000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216377] 777f56afd000-777f56afe000 ---p 00000000 00:00 0 777f56afe000-777f572fe000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216376] 777f572fe000-777f572ff000 ---p 00000000 00:00 0 777f572ff000-777f57aff000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216375] 777f57aff000-777f57b00000 ---p 00000000 00:00 0 777f57b00000-777f58300000 rw-p 00000000 00:00 0 [anon: glibc: pthread stack: 216374] 777f58300000-777f58400000 rw-p 00000000 00:00 0 [anon: Go: page alloc index] 777f58400000-777f5a400000 rw-p 00000000 00:00 0 [anon: Go: heap index] 777f5a400000-777f6a580000 ---p 00000000 00:00 0 [anon: Go: scavenge index] 777f6a580000-777f6a581000 rw-p 00000000 00:00 0 [anon: Go: scavenge index] 777f6a581000-777f7a400000 ---p 00000000 00:00 0 [anon: Go: scavenge index] 777f7a400000-777f8a580000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f8a580000-777f8a581000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f8a581000-777f9c430000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9c430000-777f9c431000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f9c431000-777f9e806000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9e806000-777f9e807000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f9e807000-777f9ec00000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9ec36000-777f9ecb6000 rw-p 00000000 00:00 0 [anon: Go: immortal metadata] 777f9ecb6000-777f9ecc6000 rw-p 00000000 00:00 0 [anon: Go: gc bits] 777f9ecc6000-777f9ecd6000 rw-p 00000000 00:00 0 [anon: Go: allspans array] 777f9ecd6000-777f9ece7000 rw-p 00000000 00:00 0 [anon: Go: immortal metadata] 777f9ece7000-777f9ed67000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9ed67000-777f9ed68000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f9ed68000-777f9ede7000 ---p 00000000 00:00 0 [anon: Go: page summary] 777f9ede7000-777f9ee07000 rw-p 00000000 00:00 0 [anon: Go: page alloc] 777f9ee07000-777f9ee0a000 rw-p 00000000 00:00 0 [anon: glibc: loader malloc] 777f9ee0a000-777f9ee2e000 r--p 00000000 00:21 48158213 /usr/lib/libc.so.6 777f9ee2e000-777f9ef9f000 r-xp 00024000 00:21 48158213 /usr/lib/libc.so.6 777f9ef9f000-777f9efee000 r--p 00195000 00:21 48158213 /usr/lib/libc.so.6 777f9efee000-777f9eff2000 r--p 001e3000 00:21 48158213 /usr/lib/libc.so.6 777f9eff2000-777f9eff4000 rw-p 001e7000 00:21 48158213 /usr/lib/libc.so.6 777f9eff4000-777f9effc000 rw-p 00000000 00:00 0 777f9effc000-777f9effe000 rw-p 00000000 00:00 0 [anon: glibc: loader malloc] 777f9f00a000-777f9f04a000 rw-p 00000000 00:00 0 [anon: Go: immortal metadata] 777f9f04a000-777f9f04c000 r--p 00000000 00:00 0 [vvar] 777f9f04c000-777f9f04e000 r--p 00000000 00:00 0 [vvar_vclock] 777f9f04e000-777f9f050000 r-xp 00000000 00:00 0 [vdso] 777f9f050000-777f9f051000 r--p 00000000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f051000-777f9f07a000 r-xp 00001000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f07a000-777f9f085000 r--p 0002a000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f085000-777f9f087000 r--p 00034000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f087000-777f9f088000 rw-p 00036000 00:21 48158204 /usr/lib/ld-linux-x86-64.so.2 777f9f088000-777f9f089000 rw-p 00000000 00:00 0 7ffc7bfa7000-7ffc7bfc8000 rw-p 00000000 00:00 0 [stack] ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall] The anonymous memory areas are now labelled so that we can see which ones have been allocated by the Go runtime versus which ones have been allocated by the glibc. Fixes #71546 Change-Id: I304e8b4dd7f2477a6da794fd44e9a7a5354e4bf4 Reviewed-on: https://go-review.googlesource.com/c/go/+/646095 Auto-Submit: Alan Donovan Commit-Queue: Alan Donovan Reviewed-by: Felix Geisendörfer LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Knyszek Reviewed-by: Dmitri Shuralyov --- src/internal/runtime/syscall/defs_linux.go | 3 ++ .../runtime/syscall/defs_linux_386.go | 1 + .../runtime/syscall/defs_linux_amd64.go | 1 + .../runtime/syscall/defs_linux_arm.go | 1 + .../runtime/syscall/defs_linux_arm64.go | 1 + .../runtime/syscall/defs_linux_loong64.go | 1 + .../runtime/syscall/defs_linux_mips64x.go | 1 + .../runtime/syscall/defs_linux_mipsx.go | 1 + .../runtime/syscall/defs_linux_ppc64x.go | 1 + .../runtime/syscall/defs_linux_riscv64.go | 1 + .../runtime/syscall/defs_linux_s390x.go | 1 + src/runtime/arena.go | 2 +- src/runtime/debuglog.go | 4 +-- src/runtime/export_test.go | 2 +- src/runtime/heapdump.go | 2 +- src/runtime/malloc.go | 28 ++++++++-------- src/runtime/mem.go | 12 +++---- src/runtime/mem_aix.go | 6 ++-- src/runtime/mem_bsd.go | 6 ++-- src/runtime/mem_darwin.go | 6 ++-- src/runtime/mem_linux.go | 16 +++++++--- src/runtime/mem_sbrk.go | 6 ++-- src/runtime/mem_windows.go | 6 ++-- src/runtime/mheap.go | 8 ++--- src/runtime/mpagealloc.go | 4 ++- src/runtime/mpagealloc_32bit.go | 6 ++-- src/runtime/mpagealloc_64bit.go | 8 ++--- src/runtime/mprof.go | 2 +- src/runtime/os_freebsd.go | 2 +- src/runtime/os_linux.go | 2 +- src/runtime/set_vma_name_linux.go | 32 +++++++++++++++++++ src/runtime/set_vma_name_stub.go | 12 +++++++ src/runtime/stack.go | 2 +- src/runtime/tracebuf.go | 2 +- src/runtime/traceregion.go | 2 +- src/runtime/vgetrandom_linux.go | 1 + 36 files changed, 130 insertions(+), 62 deletions(-) create mode 100644 src/runtime/set_vma_name_linux.go create mode 100644 src/runtime/set_vma_name_stub.go diff --git a/src/internal/runtime/syscall/defs_linux.go b/src/internal/runtime/syscall/defs_linux.go index b2e36a244f..70a1388a9e 100644 --- a/src/internal/runtime/syscall/defs_linux.go +++ b/src/internal/runtime/syscall/defs_linux.go @@ -16,4 +16,7 @@ const ( EPOLL_CTL_DEL = 0x2 EPOLL_CTL_MOD = 0x3 EFD_CLOEXEC = 0x80000 + + PR_SET_VMA = 0x53564d41 + PR_SET_VMA_ANON_NAME = 0 ) diff --git a/src/internal/runtime/syscall/defs_linux_386.go b/src/internal/runtime/syscall/defs_linux_386.go index 68e687fb14..2cfedab7c6 100644 --- a/src/internal/runtime/syscall/defs_linux_386.go +++ b/src/internal/runtime/syscall/defs_linux_386.go @@ -7,6 +7,7 @@ package syscall const ( SYS_FCNTL = 55 SYS_MPROTECT = 125 + SYS_PRCTL = 172 SYS_EPOLL_CTL = 255 SYS_EPOLL_PWAIT = 319 SYS_EPOLL_CREATE1 = 329 diff --git a/src/internal/runtime/syscall/defs_linux_amd64.go b/src/internal/runtime/syscall/defs_linux_amd64.go index ec480f5817..f664a59ad7 100644 --- a/src/internal/runtime/syscall/defs_linux_amd64.go +++ b/src/internal/runtime/syscall/defs_linux_amd64.go @@ -7,6 +7,7 @@ package syscall const ( SYS_MPROTECT = 10 SYS_FCNTL = 72 + SYS_PRCTL = 157 SYS_EPOLL_CTL = 233 SYS_EPOLL_PWAIT = 281 SYS_EPOLL_CREATE1 = 291 diff --git a/src/internal/runtime/syscall/defs_linux_arm.go b/src/internal/runtime/syscall/defs_linux_arm.go index c5d1503012..2850199a65 100644 --- a/src/internal/runtime/syscall/defs_linux_arm.go +++ b/src/internal/runtime/syscall/defs_linux_arm.go @@ -7,6 +7,7 @@ package syscall const ( SYS_FCNTL = 55 SYS_MPROTECT = 125 + SYS_PRCTL = 172 SYS_EPOLL_CTL = 251 SYS_EPOLL_PWAIT = 346 SYS_EPOLL_CREATE1 = 357 diff --git a/src/internal/runtime/syscall/defs_linux_arm64.go b/src/internal/runtime/syscall/defs_linux_arm64.go index f743fe31a5..1c951c1e7d 100644 --- a/src/internal/runtime/syscall/defs_linux_arm64.go +++ b/src/internal/runtime/syscall/defs_linux_arm64.go @@ -9,6 +9,7 @@ const ( SYS_EPOLL_CTL = 21 SYS_EPOLL_PWAIT = 22 SYS_FCNTL = 25 + SYS_PRCTL = 167 SYS_MPROTECT = 226 SYS_EPOLL_PWAIT2 = 441 SYS_EVENTFD2 = 19 diff --git a/src/internal/runtime/syscall/defs_linux_loong64.go b/src/internal/runtime/syscall/defs_linux_loong64.go index 82218d1509..dfeee05737 100644 --- a/src/internal/runtime/syscall/defs_linux_loong64.go +++ b/src/internal/runtime/syscall/defs_linux_loong64.go @@ -9,6 +9,7 @@ const ( SYS_EPOLL_CTL = 21 SYS_EPOLL_PWAIT = 22 SYS_FCNTL = 25 + SYS_PRCTL = 167 SYS_MPROTECT = 226 SYS_EPOLL_PWAIT2 = 441 SYS_EVENTFD2 = 19 diff --git a/src/internal/runtime/syscall/defs_linux_mips64x.go b/src/internal/runtime/syscall/defs_linux_mips64x.go index 4e0fd1f5d1..b9b8818d30 100644 --- a/src/internal/runtime/syscall/defs_linux_mips64x.go +++ b/src/internal/runtime/syscall/defs_linux_mips64x.go @@ -9,6 +9,7 @@ package syscall const ( SYS_MPROTECT = 5010 SYS_FCNTL = 5070 + SYS_PRCTL = 5153 SYS_EPOLL_CTL = 5208 SYS_EPOLL_PWAIT = 5272 SYS_EPOLL_CREATE1 = 5285 diff --git a/src/internal/runtime/syscall/defs_linux_mipsx.go b/src/internal/runtime/syscall/defs_linux_mipsx.go index b87a355093..f147865f44 100644 --- a/src/internal/runtime/syscall/defs_linux_mipsx.go +++ b/src/internal/runtime/syscall/defs_linux_mipsx.go @@ -9,6 +9,7 @@ package syscall const ( SYS_FCNTL = 4055 SYS_MPROTECT = 4125 + SYS_PRCTL = 4192 SYS_EPOLL_CTL = 4249 SYS_EPOLL_PWAIT = 4313 SYS_EPOLL_CREATE1 = 4326 diff --git a/src/internal/runtime/syscall/defs_linux_ppc64x.go b/src/internal/runtime/syscall/defs_linux_ppc64x.go index 8235edd795..81421089db 100644 --- a/src/internal/runtime/syscall/defs_linux_ppc64x.go +++ b/src/internal/runtime/syscall/defs_linux_ppc64x.go @@ -9,6 +9,7 @@ package syscall const ( SYS_FCNTL = 55 SYS_MPROTECT = 125 + SYS_PRCTL = 171 SYS_EPOLL_CTL = 237 SYS_EPOLL_PWAIT = 303 SYS_EPOLL_CREATE1 = 315 diff --git a/src/internal/runtime/syscall/defs_linux_riscv64.go b/src/internal/runtime/syscall/defs_linux_riscv64.go index 82218d1509..dfeee05737 100644 --- a/src/internal/runtime/syscall/defs_linux_riscv64.go +++ b/src/internal/runtime/syscall/defs_linux_riscv64.go @@ -9,6 +9,7 @@ const ( SYS_EPOLL_CTL = 21 SYS_EPOLL_PWAIT = 22 SYS_FCNTL = 25 + SYS_PRCTL = 167 SYS_MPROTECT = 226 SYS_EPOLL_PWAIT2 = 441 SYS_EVENTFD2 = 19 diff --git a/src/internal/runtime/syscall/defs_linux_s390x.go b/src/internal/runtime/syscall/defs_linux_s390x.go index 08073c01f0..1d9d5b2219 100644 --- a/src/internal/runtime/syscall/defs_linux_s390x.go +++ b/src/internal/runtime/syscall/defs_linux_s390x.go @@ -7,6 +7,7 @@ package syscall const ( SYS_FCNTL = 55 SYS_MPROTECT = 125 + SYS_PRCTL = 172 SYS_EPOLL_CTL = 250 SYS_EPOLL_PWAIT = 312 SYS_EPOLL_CREATE1 = 327 diff --git a/src/runtime/arena.go b/src/runtime/arena.go index 34821491d5..59b1bb3ba1 100644 --- a/src/runtime/arena.go +++ b/src/runtime/arena.go @@ -1041,7 +1041,7 @@ func (h *mheap) allocUserArenaChunk() *mspan { // // Unlike (*mheap).grow, just map in everything that we // asked for. We're likely going to use it all. - sysMap(unsafe.Pointer(base), userArenaChunkBytes, &gcController.heapReleased) + sysMap(unsafe.Pointer(base), userArenaChunkBytes, &gcController.heapReleased, "user arena chunk") sysUsed(unsafe.Pointer(base), userArenaChunkBytes, userArenaChunkBytes) // Model the user arena as a heap span for a large object. diff --git a/src/runtime/debuglog.go b/src/runtime/debuglog.go index ad33ef8b06..b11e5e3fab 100644 --- a/src/runtime/debuglog.go +++ b/src/runtime/debuglog.go @@ -95,7 +95,7 @@ func dlogImpl() *dloggerImpl { if l == nil { // Use sysAllocOS instead of sysAlloc because we want to interfere // with the runtime as little as possible, and sysAlloc updates accounting. - l = (*dloggerImpl)(sysAllocOS(unsafe.Sizeof(dloggerImpl{}))) + l = (*dloggerImpl)(sysAllocOS(unsafe.Sizeof(dloggerImpl{}), "debug log")) if l == nil { throw("failed to allocate debug log") } @@ -774,7 +774,7 @@ func printDebugLogImpl() { } // Use sysAllocOS instead of sysAlloc because we want to interfere // with the runtime as little as possible, and sysAlloc updates accounting. - state1 := sysAllocOS(unsafe.Sizeof(readState{}) * uintptr(n)) + state1 := sysAllocOS(unsafe.Sizeof(readState{})*uintptr(n), "debug log") if state1 == nil { println("failed to allocate read state for", n, "logs") printunlock() diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 79d83b3a3b..8da4ece881 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -536,7 +536,7 @@ func MapNextArenaHint() (start, end uintptr, ok bool) { } else { start, end = addr, addr+heapArenaBytes } - got := sysReserve(unsafe.Pointer(addr), physPageSize) + got := sysReserve(unsafe.Pointer(addr), physPageSize, "") ok = (addr == uintptr(got)) if !ok { // We were unable to get the requested reservation. diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index 8f2ae34f4d..6287cccd5f 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -727,7 +727,7 @@ func makeheapobjbv(p uintptr, size uintptr) bitvector { sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys) } n := nptr/8 + 1 - p := sysAlloc(n, &memstats.other_sys) + p := sysAlloc(n, &memstats.other_sys, "heapdump") if p == nil { throw("heapdump: out of memory") } diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index 60ea2f5188..85f22bd28e 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -570,7 +570,7 @@ func mallocinit() { // heap reservation. const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{}) - meta := uintptr(sysReserve(nil, arenaMetaSize)) + meta := uintptr(sysReserve(nil, arenaMetaSize, "heap reservation")) if meta != 0 { mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true) } @@ -607,7 +607,7 @@ func mallocinit() { 128 << 20, } for _, arenaSize := range arenaSizes { - a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes) + a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes, "heap reservation") if a != nil { mheap_.arena.init(uintptr(a), size, false) p = mheap_.arena.end // For hint below @@ -657,7 +657,7 @@ func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, arenaList *[]arenaIdx) // // Only do this if we're using the regular heap arena hints. // This behavior is only for the heap. - v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased) + v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased, "heap") if v != nil { size = n goto mapped @@ -678,7 +678,7 @@ func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, arenaList *[]arenaIdx) // Outside addressable heap. Can't use. v = nil } else { - v = sysReserve(unsafe.Pointer(p), n) + v = sysReserve(unsafe.Pointer(p), n, "heap reservation") } if p == uintptr(v) { // Success. Update the hint. @@ -714,7 +714,7 @@ func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, arenaList *[]arenaIdx) // All of the hints failed, so we'll take any // (sufficiently aligned) address the kernel will give // us. - v, size = sysReserveAligned(nil, n, heapArenaBytes) + v, size = sysReserveAligned(nil, n, heapArenaBytes, "heap") if v == nil { return nil, 0 } @@ -764,7 +764,7 @@ mapped: // is paged in is too expensive. Trying to account for the whole region means // that it will appear like an enormous memory overhead in statistics, even though // it is not. - l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2))) + l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2), "heap index")) if l2 == nil { throw("out of memory allocating heap arena map") } @@ -780,7 +780,7 @@ mapped: throw("arena already initialized") } var r *heapArena - r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys)) + r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys, "heap metadata")) if r == nil { r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys)) if r == nil { @@ -827,7 +827,7 @@ mapped: // sysReserveAligned is like sysReserve, but the returned pointer is // aligned to align bytes. It may reserve either n or n+align bytes, // so it returns the size that was reserved. -func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) { +func sysReserveAligned(v unsafe.Pointer, size, align uintptr, vmaName string) (unsafe.Pointer, uintptr) { if isSbrkPlatform { if v != nil { throw("unexpected heap arena hint on sbrk platform") @@ -839,7 +839,7 @@ func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, u // for a larger region and remove the parts we don't need. retries := 0 retry: - p := uintptr(sysReserve(v, size+align)) + p := uintptr(sysReserve(v, size+align, vmaName)) switch { case p == 0: return nil, 0 @@ -852,7 +852,7 @@ retry: // so we may have to try again. sysFreeOS(unsafe.Pointer(p), size+align) p = alignUp(p, align) - p2 := sysReserve(unsafe.Pointer(p), size) + p2 := sysReserve(unsafe.Pointer(p), size, vmaName) if p != uintptr(p2) { // Must have raced. Try again. sysFreeOS(p2, size) @@ -1933,7 +1933,7 @@ func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap { } if size >= maxBlock { - return (*notInHeap)(sysAlloc(size, sysStat)) + return (*notInHeap)(sysAlloc(size, sysStat, "immortal metadata")) } mp := acquirem() @@ -1946,7 +1946,7 @@ func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap { } persistent.off = alignUp(persistent.off, align) if persistent.off+size > persistentChunkSize || persistent.base == nil { - persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys)) + persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys, "immortal metadata")) if persistent.base == nil { if persistent == &globalAlloc.persistentAlloc { unlock(&globalAlloc.mutex) @@ -2020,7 +2020,7 @@ func (l *linearAlloc) init(base, size uintptr, mapMemory bool) { l.mapMemory = mapMemory } -func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { +func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat, vmaName string) unsafe.Pointer { p := alignUp(l.next, align) if p+size > l.end { return nil @@ -2030,7 +2030,7 @@ func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Poi if l.mapMemory { // Transition from Reserved to Prepared to Ready. n := pEnd - l.mapped - sysMap(unsafe.Pointer(l.mapped), n, sysStat) + sysMap(unsafe.Pointer(l.mapped), n, sysStat, vmaName) sysUsed(unsafe.Pointer(l.mapped), n, n) } l.mapped = pEnd diff --git a/src/runtime/mem.go b/src/runtime/mem.go index 22688d51d5..6bb91b371a 100644 --- a/src/runtime/mem.go +++ b/src/runtime/mem.go @@ -46,10 +46,10 @@ import "unsafe" // which prevents us from allocating more stack. // //go:nosplit -func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer { +func sysAlloc(n uintptr, sysStat *sysMemStat, vmaName string) unsafe.Pointer { sysStat.add(int64(n)) gcController.mappedReady.Add(int64(n)) - return sysAllocOS(n) + return sysAllocOS(n, vmaName) } // sysUnused transitions a memory region from Ready to Prepared. It notifies the @@ -142,15 +142,15 @@ func sysFault(v unsafe.Pointer, n uintptr) { // NOTE: sysReserve returns OS-aligned memory, but the heap allocator // may use larger alignment, so the caller must be careful to realign the // memory obtained by sysReserve. -func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer { - return sysReserveOS(v, n) +func sysReserve(v unsafe.Pointer, n uintptr, vmaName string) unsafe.Pointer { + return sysReserveOS(v, n, vmaName) } // sysMap transitions a memory region from Reserved to Prepared. It ensures the // memory region can be efficiently transitioned to Ready. // // sysStat must be non-nil. -func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { +func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat, vmaName string) { sysStat.add(int64(n)) - sysMapOS(v, n) + sysMapOS(v, n, vmaName) } diff --git a/src/runtime/mem_aix.go b/src/runtime/mem_aix.go index dff2756d97..c5e4710dac 100644 --- a/src/runtime/mem_aix.go +++ b/src/runtime/mem_aix.go @@ -12,7 +12,7 @@ import ( // prevents us from allocating more stack. // //go:nosplit -func sysAllocOS(n uintptr) unsafe.Pointer { +func sysAllocOS(n uintptr, _ string) unsafe.Pointer { p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) if err != 0 { if err == _EACCES { @@ -56,7 +56,7 @@ func sysFaultOS(v unsafe.Pointer, n uintptr) { mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0) } -func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { +func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer { p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0) if err != 0 { return nil @@ -64,7 +64,7 @@ func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { return p } -func sysMapOS(v unsafe.Pointer, n uintptr) { +func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { // AIX does not allow mapping a range that is already mapped. // So, call mprotect to change permissions. // Note that sysMap is always called with a non-nil pointer diff --git a/src/runtime/mem_bsd.go b/src/runtime/mem_bsd.go index 78128aedf7..0c05b44c08 100644 --- a/src/runtime/mem_bsd.go +++ b/src/runtime/mem_bsd.go @@ -14,7 +14,7 @@ import ( // which prevents us from allocating more stack. // //go:nosplit -func sysAllocOS(n uintptr) unsafe.Pointer { +func sysAllocOS(n uintptr, _ string) unsafe.Pointer { v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) if err != 0 { return nil @@ -57,7 +57,7 @@ func sysFaultOS(v unsafe.Pointer, n uintptr) { // Indicates not to reserve swap space for the mapping. const _sunosMAP_NORESERVE = 0x40 -func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { +func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer { flags := int32(_MAP_ANON | _MAP_PRIVATE) if GOOS == "solaris" || GOOS == "illumos" { // Be explicit that we don't want to reserve swap space @@ -75,7 +75,7 @@ func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { const _sunosEAGAIN = 11 const _ENOMEM = 12 -func sysMapOS(v unsafe.Pointer, n uintptr) { +func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) if err == _ENOMEM || ((GOOS == "solaris" || GOOS == "illumos") && err == _sunosEAGAIN) { throw("runtime: out of memory") diff --git a/src/runtime/mem_darwin.go b/src/runtime/mem_darwin.go index ae8487127c..9d4de51622 100644 --- a/src/runtime/mem_darwin.go +++ b/src/runtime/mem_darwin.go @@ -12,7 +12,7 @@ import ( // which prevents us from allocating more stack. // //go:nosplit -func sysAllocOS(n uintptr) unsafe.Pointer { +func sysAllocOS(n uintptr, _ string) unsafe.Pointer { v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) if err != 0 { return nil @@ -54,7 +54,7 @@ func sysFaultOS(v unsafe.Pointer, n uintptr) { mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0) } -func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { +func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer { p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0) if err != 0 { return nil @@ -64,7 +64,7 @@ func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { const _ENOMEM = 12 -func sysMapOS(v unsafe.Pointer, n uintptr) { +func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) if err == _ENOMEM { throw("runtime: out of memory") diff --git a/src/runtime/mem_linux.go b/src/runtime/mem_linux.go index 9aaa57ac9e..24e006debc 100644 --- a/src/runtime/mem_linux.go +++ b/src/runtime/mem_linux.go @@ -18,7 +18,7 @@ const ( // prevents us from allocating more stack. // //go:nosplit -func sysAllocOS(n uintptr) unsafe.Pointer { +func sysAllocOS(n uintptr, vmaName string) unsafe.Pointer { p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) if err != 0 { if err == _EACCES { @@ -31,6 +31,7 @@ func sysAllocOS(n uintptr) unsafe.Pointer { } return nil } + setVMAName(p, n, vmaName) return p } @@ -70,7 +71,10 @@ func sysUnusedOS(v unsafe.Pointer, n uintptr) { // Fall back on mmap if it's not supported. // _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE will unmap all the // pages in the old mapping, and remap the memory region. - mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) + p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) + if err == 0 && p != nil { + setVMAName(p, n, "unused") + } } if debug.harddecommit > 0 { @@ -78,6 +82,7 @@ func sysUnusedOS(v unsafe.Pointer, n uintptr) { if p != v || err != 0 { throw("runtime: cannot disable permissions in address space") } + setVMAName(p, n, "unused") } } @@ -90,6 +95,7 @@ func sysUsedOS(v unsafe.Pointer, n uintptr) { if p != v || err != 0 { throw("runtime: cannot remap pages in address space") } + setVMAName(p, n, "used") return } } @@ -154,15 +160,16 @@ func sysFaultOS(v unsafe.Pointer, n uintptr) { madvise(v, n, _MADV_DONTNEED) } -func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { +func sysReserveOS(v unsafe.Pointer, n uintptr, vmaName string) unsafe.Pointer { p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0) if err != 0 { return nil } + setVMAName(p, n, vmaName) return p } -func sysMapOS(v unsafe.Pointer, n uintptr) { +func sysMapOS(v unsafe.Pointer, n uintptr, vmaName string) { p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) if err == _ENOMEM { throw("runtime: out of memory") @@ -171,6 +178,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr) { print("runtime: mmap(", v, ", ", n, ") returned ", p, ", ", err, "\n") throw("runtime: cannot map pages in arena address space") } + setVMAName(p, n, vmaName) // Disable huge pages if the GODEBUG for it is set. // diff --git a/src/runtime/mem_sbrk.go b/src/runtime/mem_sbrk.go index cfca891086..9d6842ae52 100644 --- a/src/runtime/mem_sbrk.go +++ b/src/runtime/mem_sbrk.go @@ -157,7 +157,7 @@ func initBloc() { blocMax = bloc } -func sysAllocOS(n uintptr) unsafe.Pointer { +func sysAllocOS(n uintptr, _ string) unsafe.Pointer { lock(&memlock) p := memAlloc(n) memCheck() @@ -195,13 +195,13 @@ func sysNoHugePageOS(v unsafe.Pointer, n uintptr) { func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) { } -func sysMapOS(v unsafe.Pointer, n uintptr) { +func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { } func sysFaultOS(v unsafe.Pointer, n uintptr) { } -func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { +func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer { lock(&memlock) var p unsafe.Pointer if uintptr(v) == bloc { diff --git a/src/runtime/mem_windows.go b/src/runtime/mem_windows.go index 477d898870..75860a4c1d 100644 --- a/src/runtime/mem_windows.go +++ b/src/runtime/mem_windows.go @@ -25,7 +25,7 @@ const ( // which prevents us from allocating more stack. // //go:nosplit -func sysAllocOS(n uintptr) unsafe.Pointer { +func sysAllocOS(n uintptr, _ string) unsafe.Pointer { return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE)) } @@ -117,7 +117,7 @@ func sysFaultOS(v unsafe.Pointer, n uintptr) { sysUnusedOS(v, n) } -func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { +func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer { // v is just a hint. // First try at v. // This will fail if any of [v, v+n) is already reserved. @@ -130,5 +130,5 @@ func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE)) } -func sysMapOS(v unsafe.Pointer, n uintptr) { +func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { } diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 28ca5c3a70..50ff68646f 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -548,7 +548,7 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) { } var new []*mspan sp := (*slice)(unsafe.Pointer(&new)) - sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys) + sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys, "allspans array") if sp.array == nil { throw("runtime: cannot allocate memory") } @@ -1527,7 +1527,7 @@ func (h *mheap) grow(npage uintptr) (uintptr, bool) { // Transition this space from Reserved to Prepared and mark it // as released since we'll be able to start using it after updating // the page allocator and releasing the lock at any time. - sysMap(unsafe.Pointer(h.curArena.base), size, &gcController.heapReleased) + sysMap(unsafe.Pointer(h.curArena.base), size, &gcController.heapReleased, "heap") // Update stats. stats := memstats.heapStats.acquire() atomic.Xaddint64(&stats.released, int64(size)) @@ -1558,7 +1558,7 @@ func (h *mheap) grow(npage uintptr) (uintptr, bool) { // The allocation is always aligned to the heap arena // size which is always > physPageSize, so its safe to // just add directly to heapReleased. - sysMap(unsafe.Pointer(v), nBase-v, &gcController.heapReleased) + sysMap(unsafe.Pointer(v), nBase-v, &gcController.heapReleased, "heap") // The memory just allocated counts as both released // and idle, even though it's not yet backed by spans. @@ -2658,7 +2658,7 @@ func newArenaMayUnlock() *gcBitsArena { var result *gcBitsArena if gcBitsArenas.free == nil { unlock(&gcBitsArenas.lock) - result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys)) + result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys, "gc bits")) if result == nil { throw("runtime: cannot allocate memory") } diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go index 46d3ebacaf..c9491e31f4 100644 --- a/src/runtime/mpagealloc.go +++ b/src/runtime/mpagealloc.go @@ -81,6 +81,8 @@ const ( // there should this change. pallocChunksL2Bits = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits pallocChunksL1Shift = pallocChunksL2Bits + + vmaNamePageAllocIndex = "page alloc index" ) // maxSearchAddr returns the maximum searchAddr value, which indicates @@ -401,7 +403,7 @@ func (p *pageAlloc) grow(base, size uintptr) { if p.chunks[c.l1()] == nil { // Create the necessary l2 entry. const l2Size = unsafe.Sizeof(*p.chunks[0]) - r := sysAlloc(l2Size, p.sysStat) + r := sysAlloc(l2Size, p.sysStat, vmaNamePageAllocIndex) if r == nil { throw("pageAlloc: out of memory") } diff --git a/src/runtime/mpagealloc_32bit.go b/src/runtime/mpagealloc_32bit.go index 900146e363..4e99be1c2a 100644 --- a/src/runtime/mpagealloc_32bit.go +++ b/src/runtime/mpagealloc_32bit.go @@ -71,12 +71,12 @@ func (p *pageAlloc) sysInit(test bool) { totalSize = alignUp(totalSize, physPageSize) // Reserve memory for all levels in one go. There shouldn't be much for 32-bit. - reservation := sysReserve(nil, totalSize) + reservation := sysReserve(nil, totalSize, "page summary") if reservation == nil { throw("failed to reserve page summary memory") } // There isn't much. Just map it and mark it as used immediately. - sysMap(reservation, totalSize, p.sysStat) + sysMap(reservation, totalSize, p.sysStat, "page summary") sysUsed(reservation, totalSize, totalSize) p.summaryMappedReady += totalSize @@ -123,7 +123,7 @@ func (s *scavengeIndex) sysInit(test bool, sysStat *sysMemStat) (mappedReady uin if test { // Set up the scavenge index via sysAlloc so the test can free it later. scavIndexSize := uintptr(len(scavengeIndexArray)) * unsafe.Sizeof(atomicScavChunkData{}) - s.chunks = ((*[(1 << heapAddrBits) / pallocChunkBytes]atomicScavChunkData)(sysAlloc(scavIndexSize, sysStat)))[:] + s.chunks = ((*[(1 << heapAddrBits) / pallocChunkBytes]atomicScavChunkData)(sysAlloc(scavIndexSize, sysStat, vmaNamePageAllocIndex)))[:] mappedReady = scavIndexSize } else { // Set up the scavenge index. diff --git a/src/runtime/mpagealloc_64bit.go b/src/runtime/mpagealloc_64bit.go index 36cd222360..eb425f0704 100644 --- a/src/runtime/mpagealloc_64bit.go +++ b/src/runtime/mpagealloc_64bit.go @@ -76,7 +76,7 @@ func (p *pageAlloc) sysInit(test bool) { // Reserve b bytes of memory anywhere in the address space. b := alignUp(uintptr(entries)*pallocSumBytes, physPageSize) - r := sysReserve(nil, b) + r := sysReserve(nil, b, "page summary") if r == nil { throw("failed to reserve page summary memory") } @@ -176,7 +176,7 @@ func (p *pageAlloc) sysGrow(base, limit uintptr) { } // Map and commit need. - sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat) + sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat, "page alloc") sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size()) p.summaryMappedReady += need.size() } @@ -229,7 +229,7 @@ func (s *scavengeIndex) sysGrow(base, limit uintptr, sysStat *sysMemStat) uintpt // If we've got something to map, map it, and update the slice bounds. if need.size() != 0 { - sysMap(unsafe.Pointer(need.base.addr()), need.size(), sysStat) + sysMap(unsafe.Pointer(need.base.addr()), need.size(), sysStat, "scavenge index") sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size()) // Update the indices only after the new memory is valid. if haveMax == 0 || needMin < haveMin { @@ -248,7 +248,7 @@ func (s *scavengeIndex) sysGrow(base, limit uintptr, sysStat *sysMemStat) uintpt func (s *scavengeIndex) sysInit(test bool, sysStat *sysMemStat) uintptr { n := uintptr(1<