]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: de-duplicate span scavenging
authorMichael Anthony Knyszek <mknyszek@google.com>
Tue, 2 Oct 2018 20:23:41 +0000 (20:23 +0000)
committerMichael Knyszek <mknyszek@google.com>
Wed, 17 Oct 2018 16:25:42 +0000 (16:25 +0000)
Currently, span scavenging was done nearly identically in two different
locations. This change deduplicates that into one shared routine.

For #14045.

Change-Id: I15006b2c9af0e70b7a9eae9abb4168d3adca3860
Reviewed-on: https://go-review.googlesource.com/c/139297
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
src/runtime/mheap.go

index 48b3f5364aa5bdbb8cd9e6afe68ecf5e8bf6a516..e35a8331fabd71e5f3a475a61049673e43c8d645 100644 (file)
@@ -351,6 +351,34 @@ func (s *mspan) layout() (size, n, total uintptr) {
        return
 }
 
+func (s *mspan) scavenge() uintptr {
+       start := s.base()
+       end := start + s.npages<<_PageShift
+       if physPageSize > _PageSize {
+               // We can only release pages in
+               // physPageSize blocks, so round start
+               // and end in. (Otherwise, madvise
+               // will round them *out* and release
+               // more memory than we want.)
+               start = (start + physPageSize - 1) &^ (physPageSize - 1)
+               end &^= physPageSize - 1
+               if end <= start {
+                       // start and end don't span a
+                       // whole physical page.
+                       return 0
+               }
+       }
+       len := end - start
+       released := len - (s.npreleased << _PageShift)
+       if physPageSize > _PageSize && released == 0 {
+               return 0
+       }
+       memstats.heap_released += uint64(released)
+       s.npreleased = len >> _PageShift
+       sysUnused(unsafe.Pointer(start), len)
+       return released
+}
+
 // recordspan adds a newly allocated span to h.allspans.
 //
 // This only happens the first time a span is allocated from
@@ -1087,35 +1115,12 @@ func (h *mheap) busyList(npages uintptr) *mSpanList {
 
 func scavengeTreapNode(t *treapNode, now, limit uint64) uintptr {
        s := t.spanKey
-       var sumreleased uintptr
        if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
-               start := s.base()
-               end := start + s.npages<<_PageShift
-               if physPageSize > _PageSize {
-                       // We can only release pages in
-                       // physPageSize blocks, so round start
-                       // and end in. (Otherwise, madvise
-                       // will round them *out* and release
-                       // more memory than we want.)
-                       start = (start + physPageSize - 1) &^ (physPageSize - 1)
-                       end &^= physPageSize - 1
-                       if end <= start {
-                               // start and end don't span a
-                               // whole physical page.
-                               return sumreleased
-                       }
+               if released := s.scavenge(); released != 0 {
+                       return released
                }
-               len := end - start
-               released := len - (s.npreleased << _PageShift)
-               if physPageSize > _PageSize && released == 0 {
-                       return sumreleased
-               }
-               memstats.heap_released += uint64(released)
-               sumreleased += released
-               s.npreleased = len >> _PageShift
-               sysUnused(unsafe.Pointer(start), len)
        }
-       return sumreleased
+       return 0
 }
 
 func scavengelist(list *mSpanList, now, limit uint64) uintptr {
@@ -1128,32 +1133,7 @@ func scavengelist(list *mSpanList, now, limit uint64) uintptr {
                if (now-uint64(s.unusedsince)) <= limit || s.npreleased == s.npages {
                        continue
                }
-               start := s.base()
-               end := start + s.npages<<_PageShift
-               if physPageSize > _PageSize {
-                       // We can only release pages in
-                       // physPageSize blocks, so round start
-                       // and end in. (Otherwise, madvise
-                       // will round them *out* and release
-                       // more memory than we want.)
-                       start = (start + physPageSize - 1) &^ (physPageSize - 1)
-                       end &^= physPageSize - 1
-                       if end <= start {
-                               // start and end don't span a
-                               // whole physical page.
-                               continue
-                       }
-               }
-               len := end - start
-
-               released := len - (s.npreleased << _PageShift)
-               if physPageSize > _PageSize && released == 0 {
-                       continue
-               }
-               memstats.heap_released += uint64(released)
-               sumreleased += released
-               s.npreleased = len >> _PageShift
-               sysUnused(unsafe.Pointer(start), len)
+               sumreleased += s.scavenge()
        }
        return sumreleased
 }