From: Michael Anthony Knyszek Date: Thu, 8 Nov 2018 20:06:58 +0000 (+0000) Subject: runtime: allocate from free and scav fairly X-Git-Tag: go1.12beta1~19 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=064842450bd904d2519d723a0109b33007ad00e9;p=gostls13.git runtime: allocate from free and scav fairly This change modifies the behavior of span allocations to no longer prefer the free treap over the scavenged treap. While there is an additional cost to allocating out of the scavenged treap, the current behavior of preferring the unscavenged spans can lead to unbounded growth of a program's virtual memory footprint. In small programs (low # of Ps, low resident set size, low allocation rate) this behavior isn't really apparent and is difficult to reproduce. However, in relatively large, long-running programs we see this unbounded growth in free spans, and an unbounded amount of heap growths. It still remains unclear how this policy change actually ends up increasing the number of heap growths over time, but switching the policy back to best-fit does indeed solve the problem. Change-Id: Ibb88d24f9ef6766baaa7f12b411974cc03341e7b Reviewed-on: https://go-review.googlesource.com/c/148979 Run-TryBot: Michael Knyszek TryBot-Result: Gobot Gobot Reviewed-by: Rick Hudson Reviewed-by: Austin Clements --- diff --git a/src/runtime/mgclarge.go b/src/runtime/mgclarge.go index 663a37e3ed..2a04d4a793 100644 --- a/src/runtime/mgclarge.go +++ b/src/runtime/mgclarge.go @@ -297,14 +297,13 @@ func (root *mTreap) removeNode(t *treapNode) { mheap_.treapalloc.free(unsafe.Pointer(t)) } -// remove searches for, finds, removes from the treap, and returns the smallest -// span that can hold npages. If no span has at least npages return nil. +// find searches for, finds, and returns the treap node containing the +// smallest span that can hold npages. If no span has at least npages +// it returns nil. // This is slightly more complicated than a simple binary tree search // since if an exact match is not found the next larger node is // returned. -// If the last node inspected > npagesKey not holding -// a left node (a smaller npages) is the "best fit" node. -func (root *mTreap) remove(npages uintptr) *mspan { +func (root *mTreap) find(npages uintptr) *treapNode { t := root.treap for t != nil { if t.spanKey == nil { @@ -315,9 +314,7 @@ func (root *mTreap) remove(npages uintptr) *mspan { } else if t.left != nil && t.left.npagesKey >= npages { t = t.left } else { - result := t.spanKey - root.removeNode(t) - return result + return t } } return nil diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index b5e0b0f306..9d7d683cd1 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -998,19 +998,34 @@ func (h *mheap) setSpans(base, npage uintptr, s *mspan) { } } +// pickFreeSpan acquires a free span from internal free list +// structures if one is available. Otherwise returns nil. +// h must be locked. +func (h *mheap) pickFreeSpan(npage uintptr) *mspan { + tf := h.free.find(npage) + ts := h.scav.find(npage) + + // Check for whichever treap gave us the smaller, non-nil result. + // Note that we want the _smaller_ free span, i.e. the free span + // closer in size to the amount we requested (npage). + var s *mspan + if tf != nil && (ts == nil || tf.spanKey.npages <= ts.spanKey.npages) { + s = tf.spanKey + h.free.removeNode(tf) + } else if ts != nil && (tf == nil || tf.spanKey.npages > ts.spanKey.npages) { + s = ts.spanKey + h.scav.removeNode(ts) + } + return s +} + // Allocates a span of the given size. h must be locked. // The returned span has been removed from the // free structures, but its state is still mSpanFree. func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan { var s *mspan - // First, attempt to allocate from free spans, then from - // scavenged spans, looking for best fit in each. - s = h.free.remove(npage) - if s != nil { - goto HaveSpan - } - s = h.scav.remove(npage) + s = h.pickFreeSpan(npage) if s != nil { goto HaveSpan } @@ -1018,23 +1033,19 @@ func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan { if !h.grow(npage) { return nil } - s = h.free.remove(npage) + s = h.pickFreeSpan(npage) if s != nil { goto HaveSpan } - s = h.scav.remove(npage) - if s != nil { - goto HaveSpan - } - return nil + throw("grew heap, but no adequate free span found") HaveSpan: // Mark span in use. if s.state != mSpanFree { - throw("mheap.allocLocked - mspan not free") + throw("candidate mspan for allocation is not free") } if s.npages < npage { - throw("mheap.allocLocked - bad npages") + throw("candidate mspan for allocation is too small") } // First, subtract any memory that was released back to