From: Michael Anthony Knyszek Date: Thu, 4 Oct 2018 15:33:08 +0000 (+0000) Subject: runtime: scavenge large spans before heap growth X-Git-Tag: go1.12beta1~581 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=c803ffc67d0c90b24eb5a60a7d573eecc39e7753;p=gostls13.git runtime: scavenge large spans before heap growth This change scavenges the largest spans before growing the heap for physical pages to "make up" for the newly-mapped space which, presumably, will be touched. In theory, this approach to scavenging helps reduce the RSS of an application by marking fragments in memory as reclaimable to the OS more eagerly than before. In practice this may not necessarily be true, depending on how sysUnused is implemented for each platform. Fixes #14045. Change-Id: Iab60790be05935865fc71f793cb9323ab00a18bd Reviewed-on: https://go-review.googlesource.com/c/139719 Run-TryBot: Michael Knyszek TryBot-Result: Gobot Gobot Reviewed-by: Austin Clements --- diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index ddbc872080..8f6db8eec5 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -945,6 +945,14 @@ func (h *mheap) grow(npage uintptr) bool { return false } + // Scavenge some pages out of the free treap to make up for + // the virtual memory space we just allocated. We prefer to + // scavenge the largest spans first since the cost of scavenging + // is proportional to the number of sysUnused() calls rather than + // the number of pages released, so we make fewer of those calls + // with larger spans. + h.scavengeLargest(size) + // Create a fake "in use" span and free it, so that the // right coalescing happens. s := (*mspan)(h.spanalloc.alloc()) @@ -1107,6 +1115,46 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i } } +// scavengeLargest scavenges nbytes worth of spans in unscav +// starting from the largest span and working down. It then takes those spans +// and places them in scav. h must be locked. +func (h *mheap) scavengeLargest(nbytes uintptr) { + // Find the largest child. + t := h.free.treap + if t == nil { + return + } + for t.right != nil { + t = t.right + } + // Iterate over the treap from the largest child to the smallest by + // starting from the largest and finding its predecessor until we've + // recovered nbytes worth of physical memory, or it no longer has a + // predecessor (meaning the treap is now empty). + released := uintptr(0) + for t != nil && released < nbytes { + s := t.spanKey + r := s.scavenge() + if r == 0 { + // Since we're going in order of largest-to-smallest span, this + // means all other spans are no bigger than s. There's a high + // chance that the other spans don't even cover a full page, + // (though they could) but iterating further just for a handful + // of pages probably isn't worth it, so just stop here. + // + // This check also preserves the invariant that spans that have + // `scavenged` set are only ever in the `scav` treap, and + // those which have it unset are only in the `free` treap. + return + } + prev := t.pred() + h.free.removeNode(t) + t = prev + h.scav.insert(s) + released += r + } +} + // scavengeAll visits each node in the unscav treap and scavenges the // treapNode's span. It then removes the scavenged span from // unscav and adds it into scav before continuing. h must be locked.