]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: stop unnecessary span scavenges on free
authorMichael Anthony Knyszek <mknyszek@google.com>
Tue, 6 Nov 2018 00:10:33 +0000 (00:10 +0000)
committerMichael Knyszek <mknyszek@google.com>
Fri, 9 Nov 2018 20:57:57 +0000 (20:57 +0000)
This change fixes a bug wherein freeing a scavenged span that didn't
coalesce with any neighboring spans would result in that span getting
scavenged again. This case may actually be a common occurance because
"freeing" span trimmings and newly-grown spans end up using the same
codepath. On systems where madvise is relatively expensive, this can
have a large performance impact.

This change also cleans up some of this logic in freeSpanLocked since
a number of factors made the coalescing code somewhat difficult to
reason about with respect to scavenging. Notably, the way the
needsScavenge boolean is handled could be better expressed and the
inverted conditions (e.g. !after.released) can make things even more
confusing.

Fixes #28595.

Change-Id: I75228dba70b6596b90853020b7c24fbe7ab937cf
Reviewed-on: https://go-review.googlesource.com/c/147559
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
src/runtime/mheap.go

index 43f59adb8af281160a025452b2ab82b59451160d..9b121c63a1ca6a0d443f08fa4d35b67335dc2e8e 100644 (file)
@@ -1054,7 +1054,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
 
        // We scavenge s at the end after coalescing if s or anything
        // it merged with is marked scavenged.
-       needsScavenge := s.scavenged
+       needsScavenge := false
        prescavenged := s.released() // number of bytes already scavenged.
 
        // Coalesce with earlier, later spans.
@@ -1064,14 +1064,15 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
                s.npages += before.npages
                s.needzero |= before.needzero
                h.setSpan(before.base(), s)
+               // If before or s are scavenged, then we need to scavenge the final coalesced span.
+               needsScavenge = needsScavenge || before.scavenged || s.scavenged
+               prescavenged += before.released()
                // The size is potentially changing so the treap needs to delete adjacent nodes and
                // insert back as a combined node.
-               if !before.scavenged {
-                       h.free.removeSpan(before)
-               } else {
+               if before.scavenged {
                        h.scav.removeSpan(before)
-                       needsScavenge = true
-                       prescavenged += before.released()
+               } else {
+                       h.free.removeSpan(before)
                }
                before.state = mSpanDead
                h.spanalloc.free(unsafe.Pointer(before))
@@ -1082,12 +1083,12 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
                s.npages += after.npages
                s.needzero |= after.needzero
                h.setSpan(s.base()+s.npages*pageSize-1, s)
-               if !after.scavenged {
-                       h.free.removeSpan(after)
-               } else {
+               needsScavenge = needsScavenge || after.scavenged || s.scavenged
+               prescavenged += after.released()
+               if after.scavenged {
                        h.scav.removeSpan(after)
-                       needsScavenge = true
-                       prescavenged += after.released()
+               } else {
+                       h.free.removeSpan(after)
                }
                after.state = mSpanDead
                h.spanalloc.free(unsafe.Pointer(after))