From: Lance Yang Date: Tue, 28 Oct 2025 08:02:13 +0000 (+0000) Subject: runtime: avoid zeroing scavenged memory X-Git-Tag: go1.26rc1~410 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=27937289dc9fccf1f5513475145799087f39b964;p=gostls13.git runtime: avoid zeroing scavenged memory On Linux, memory returned to the kernel via MADV_DONTNEED is guaranteed to be zero-filled on its next use. This commit leverages this kernel behavior to avoid a redundant software zeroing pass in the runtime, improving performance. Change-Id: Ia14343b447a2cec7af87644fe8050e23e983c787 GitHub-Last-Rev: 6c8df322836e70922c69ca3c5aac36e4b8a0839a GitHub-Pull-Request: golang/go#76063 Reviewed-on: https://go-review.googlesource.com/c/go/+/715160 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Knyszek Reviewed-by: David Chase --- diff --git a/src/runtime/arena.go b/src/runtime/arena.go index 52a2a99d6c..2095bfa8e0 100644 --- a/src/runtime/arena.go +++ b/src/runtime/arena.go @@ -1051,7 +1051,11 @@ func (h *mheap) allocUserArenaChunk() *mspan { // Model the user arena as a heap span for a large object. spc := makeSpanClass(0, false) - h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages) + // A user arena chunk is always fresh from the OS. It's either newly allocated + // via sysAlloc() or reused from the readyList after a sysFault(). The memory is + // then re-mapped via sysMap(), so we can safely treat it as scavenged; the + // kernel guarantees it will be zero-filled on its next use. + h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages, userArenaChunkBytes) s.isUserArenaChunk = true s.elemsize -= userArenaChunkReserveBytes() s.freeindex = 1 diff --git a/src/runtime/mem.go b/src/runtime/mem.go index cd06ea323d..f0b00c7715 100644 --- a/src/runtime/mem.go +++ b/src/runtime/mem.go @@ -70,6 +70,12 @@ func sysUnused(v unsafe.Pointer, n uintptr) { sysUnusedOS(v, n) } +// needZeroAfterSysUnused reports whether memory returned by sysUnused must be +// zeroed for use. +func needZeroAfterSysUnused() bool { + return needZeroAfterSysUnusedOS() +} + // sysUsed transitions a memory region from Prepared to Ready. It notifies the // operating system that the memory region is needed and ensures that the region // may be safely accessed. This is typically a no-op on systems that don't have diff --git a/src/runtime/mem_aix.go b/src/runtime/mem_aix.go index c5e4710dac..1203af5797 100644 --- a/src/runtime/mem_aix.go +++ b/src/runtime/mem_aix.go @@ -79,3 +79,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { throw("runtime: cannot map pages in arena address space") } } + +func needZeroAfterSysUnusedOS() bool { + return true +} diff --git a/src/runtime/mem_bsd.go b/src/runtime/mem_bsd.go index 0c05b44c08..70375615da 100644 --- a/src/runtime/mem_bsd.go +++ b/src/runtime/mem_bsd.go @@ -85,3 +85,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { throw("runtime: cannot map pages in arena address space") } } + +func needZeroAfterSysUnusedOS() bool { + return true +} diff --git a/src/runtime/mem_darwin.go b/src/runtime/mem_darwin.go index 9d4de51622..100512f5cd 100644 --- a/src/runtime/mem_darwin.go +++ b/src/runtime/mem_darwin.go @@ -74,3 +74,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { throw("runtime: cannot map pages in arena address space") } } + +func needZeroAfterSysUnusedOS() bool { + return true +} diff --git a/src/runtime/mem_linux.go b/src/runtime/mem_linux.go index 24e006debc..ce25537611 100644 --- a/src/runtime/mem_linux.go +++ b/src/runtime/mem_linux.go @@ -188,3 +188,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, vmaName string) { sysNoHugePageOS(v, n) } } + +func needZeroAfterSysUnusedOS() bool { + return debug.madvdontneed == 0 +} diff --git a/src/runtime/mem_sbrk.go b/src/runtime/mem_sbrk.go index 5284bbd000..9e752df2c3 100644 --- a/src/runtime/mem_sbrk.go +++ b/src/runtime/mem_sbrk.go @@ -296,3 +296,7 @@ func sysReserveAlignedSbrk(size, align uintptr) (unsafe.Pointer, uintptr) { }) return unsafe.Pointer(p), size } + +func needZeroAfterSysUnusedOS() bool { + return true +} diff --git a/src/runtime/mem_windows.go b/src/runtime/mem_windows.go index 3db6fc2ba4..afc2dee19f 100644 --- a/src/runtime/mem_windows.go +++ b/src/runtime/mem_windows.go @@ -132,3 +132,7 @@ func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer { func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { } + +func needZeroAfterSysUnusedOS() bool { + return true +} diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index f2dc3717b1..711c7790eb 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -1394,7 +1394,7 @@ HaveSpan: } // Initialize the span. - h.initSpan(s, typ, spanclass, base, npages) + h.initSpan(s, typ, spanclass, base, npages, scav) if valgrindenabled { valgrindMempoolMalloc(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base), npages*pageSize) @@ -1440,11 +1440,17 @@ HaveSpan: // initSpan initializes a blank span s which will represent the range // [base, base+npages*pageSize). typ is the type of span being allocated. -func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages uintptr) { +func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages, scav uintptr) { // At this point, both s != nil and base != 0, and the heap // lock is no longer held. Initialize the span. s.init(base, npages) - if h.allocNeedsZero(base, npages) { + // Always call allocNeedsZero to update the arena's zeroedBase watermark + // and determine if the memory is considered dirty. + needZero := h.allocNeedsZero(base, npages) + // If these pages were scavenged (returned to the OS), the kernel guarantees + // they will be zero-filled on next use (fault-in), so we can treat them as + // already zeroed and skip explicit clearing. + if (needZeroAfterSysUnused() || scav != npages*pageSize) && needZero { s.needzero = 1 } nbytes := npages * pageSize