]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: de-duplicate coalescing code
authorMichael Anthony Knyszek <mknyszek@google.com>
Tue, 15 Jan 2019 23:48:57 +0000 (23:48 +0000)
committerAustin Clements <austin@google.com>
Thu, 17 Jan 2019 17:05:37 +0000 (17:05 +0000)
Currently the code surrounding coalescing is duplicated between merging
with the span before the span considered for coalescing and merging with
the span after. This change factors out the shared portions of these
codepaths into a local closure which acts as a helper.

Change-Id: I7919fbed3f9a833eafb324a21a4beaa81f2eaa91
Reviewed-on: https://go-review.googlesource.com/c/158077
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
src/runtime/mheap.go

index d4096624514609f3e6f816baa69930017a7922bc..6a7f9bacdbd9df6ef479df21d2c436451ae23f27 100644 (file)
@@ -425,41 +425,41 @@ func (h *mheap) coalesce(s *mspan) {
        needsScavenge := false
        prescavenged := s.released() // number of bytes already scavenged.
 
-       // Coalesce with earlier, later spans.
-       if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
-               // Now adjust s.
-               s.startAddr = before.startAddr
-               s.npages += before.npages
-               s.needzero |= before.needzero
-               h.setSpan(before.base(), s)
+       // merge is a helper which merges other into s, deletes references to other
+       // in heap metadata, and then discards it.
+       merge := func(other *mspan) {
+               // Adjust s via base and npages.
+               if other.startAddr < s.startAddr {
+                       s.startAddr = other.startAddr
+               }
+               s.npages += other.npages
+               s.needzero |= other.needzero
+
                // If before or s are scavenged, then we need to scavenge the final coalesced span.
-               needsScavenge = needsScavenge || before.scavenged || s.scavenged
-               prescavenged += before.released()
+               needsScavenge = needsScavenge || other.scavenged || s.scavenged
+               prescavenged += other.released()
+
                // The size is potentially changing so the treap needs to delete adjacent nodes and
                // insert back as a combined node.
-               if before.scavenged {
-                       h.scav.removeSpan(before)
+               if other.scavenged {
+                       h.scav.removeSpan(other)
                } else {
-                       h.free.removeSpan(before)
+                       h.free.removeSpan(other)
                }
-               before.state = mSpanDead
-               h.spanalloc.free(unsafe.Pointer(before))
+               other.state = mSpanDead
+               h.spanalloc.free(unsafe.Pointer(other))
+       }
+
+       // Coalesce with earlier, later spans.
+       if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
+               merge(before)
+               h.setSpan(s.base(), s)
        }
 
        // Now check to see if next (greater addresses) span is free and can be coalesced.
        if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree {
-               s.npages += after.npages
-               s.needzero |= after.needzero
+               merge(after)
                h.setSpan(s.base()+s.npages*pageSize-1, s)
-               needsScavenge = needsScavenge || after.scavenged || s.scavenged
-               prescavenged += after.released()
-               if after.scavenged {
-                       h.scav.removeSpan(after)
-               } else {
-                       h.free.removeSpan(after)
-               }
-               after.state = mSpanDead
-               h.spanalloc.free(unsafe.Pointer(after))
        }
 
        if needsScavenge {