// heap_released since we already did so earlier.
sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
s.scavenged = false
-
- // Since we allocated out of a scavenged span, we just
- // grew the RSS. Mitigate this by scavenging enough free
- // space to make up for it but only if we need to.
- //
- // scavengeLocked may cause coalescing, so prevent
- // coalescing with s by temporarily changing its state.
- s.state = mSpanManual
- h.scavengeIfNeededLocked(s.npages * pageSize)
- s.state = mSpanFree
}
h.setSpans(s.base(), npage, s)
//
// h must be locked.
func (h *mheap) growAddSpan(v unsafe.Pointer, size uintptr) {
+ // Scavenge some pages to make up for the virtual memory space
+ // we just allocated, but only if we need to.
+ h.scavengeIfNeededLocked(size)
+
s := (*mspan)(h.spanalloc.alloc())
s.init(uintptr(v), size/pageSize)
h.setSpans(s.base(), s.npages, s)