From 06be7cbf3c27168172f1f89dd4f55cb07a37ec38 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Tue, 6 Nov 2018 00:10:33 +0000 Subject: [PATCH] runtime: stop unnecessary span scavenges on free This change fixes a bug wherein freeing a scavenged span that didn't coalesce with any neighboring spans would result in that span getting scavenged again. This case may actually be a common occurance because "freeing" span trimmings and newly-grown spans end up using the same codepath. On systems where madvise is relatively expensive, this can have a large performance impact. This change also cleans up some of this logic in freeSpanLocked since a number of factors made the coalescing code somewhat difficult to reason about with respect to scavenging. Notably, the way the needsScavenge boolean is handled could be better expressed and the inverted conditions (e.g. !after.released) can make things even more confusing. Fixes #28595. Change-Id: I75228dba70b6596b90853020b7c24fbe7ab937cf Reviewed-on: https://go-review.googlesource.com/c/147559 Run-TryBot: Michael Knyszek TryBot-Result: Gobot Gobot Reviewed-by: Rick Hudson --- src/runtime/mheap.go | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 43f59adb8a..9b121c63a1 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -1054,7 +1054,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i // We scavenge s at the end after coalescing if s or anything // it merged with is marked scavenged. - needsScavenge := s.scavenged + needsScavenge := false prescavenged := s.released() // number of bytes already scavenged. // Coalesce with earlier, later spans. @@ -1064,14 +1064,15 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i s.npages += before.npages s.needzero |= before.needzero h.setSpan(before.base(), s) + // If before or s are scavenged, then we need to scavenge the final coalesced span. + needsScavenge = needsScavenge || before.scavenged || s.scavenged + prescavenged += before.released() // The size is potentially changing so the treap needs to delete adjacent nodes and // insert back as a combined node. - if !before.scavenged { - h.free.removeSpan(before) - } else { + if before.scavenged { h.scav.removeSpan(before) - needsScavenge = true - prescavenged += before.released() + } else { + h.free.removeSpan(before) } before.state = mSpanDead h.spanalloc.free(unsafe.Pointer(before)) @@ -1082,12 +1083,12 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i s.npages += after.npages s.needzero |= after.needzero h.setSpan(s.base()+s.npages*pageSize-1, s) - if !after.scavenged { - h.free.removeSpan(after) - } else { + needsScavenge = needsScavenge || after.scavenged || s.scavenged + prescavenged += after.released() + if after.scavenged { h.scav.removeSpan(after) - needsScavenge = true - prescavenged += after.released() + } else { + h.free.removeSpan(after) } after.state = mSpanDead h.spanalloc.free(unsafe.Pointer(after)) -- 2.48.1