// Model the user arena as a heap span for a large object.
spc := makeSpanClass(0, false)
- h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages)
+ // A user arena chunk is always fresh from the OS. It's either newly allocated
+ // via sysAlloc() or reused from the readyList after a sysFault(). The memory is
+ // then re-mapped via sysMap(), so we can safely treat it as scavenged; the
+ // kernel guarantees it will be zero-filled on its next use.
+ h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages, userArenaChunkBytes)
s.isUserArenaChunk = true
s.elemsize -= userArenaChunkReserveBytes()
s.freeindex = 1
sysUnusedOS(v, n)
}
+// needZeroAfterSysUnused reports whether memory returned by sysUnused must be
+// zeroed for use.
+func needZeroAfterSysUnused() bool {
+ return needZeroAfterSysUnusedOS()
+}
+
// sysUsed transitions a memory region from Prepared to Ready. It notifies the
// operating system that the memory region is needed and ensures that the region
// may be safely accessed. This is typically a no-op on systems that don't have
}
// Initialize the span.
- h.initSpan(s, typ, spanclass, base, npages)
+ h.initSpan(s, typ, spanclass, base, npages, scav)
if valgrindenabled {
valgrindMempoolMalloc(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base), npages*pageSize)
// initSpan initializes a blank span s which will represent the range
// [base, base+npages*pageSize). typ is the type of span being allocated.
-func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages uintptr) {
+func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages, scav uintptr) {
// At this point, both s != nil and base != 0, and the heap
// lock is no longer held. Initialize the span.
s.init(base, npages)
- if h.allocNeedsZero(base, npages) {
+ // Always call allocNeedsZero to update the arena's zeroedBase watermark
+ // and determine if the memory is considered dirty.
+ needZero := h.allocNeedsZero(base, npages)
+ // If these pages were scavenged (returned to the OS), the kernel guarantees
+ // they will be zero-filled on next use (fault-in), so we can treat them as
+ // already zeroed and skip explicit clearing.
+ if (needZeroAfterSysUnused() || scav != npages*pageSize) && needZero {
s.needzero = 1
}
nbytes := npages * pageSize