]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: scavenge memory on physical page-aligned boundaries
authorAustin Clements <austin@google.com>
Thu, 14 Apr 2016 17:41:36 +0000 (13:41 -0400)
committerAustin Clements <austin@google.com>
Sat, 16 Apr 2016 21:42:43 +0000 (21:42 +0000)
Currently the scavenger marks memory unused in multiples of the
allocator page size (8K). This is safe as long as the true physical
page size is 4K (or 8K), as it is on many platforms. However, on
ARM64, PPC64x, and MIPS64, the physical page size is larger than 8K,
so if we attempt to mark memory unused, the kernel will round the
boundaries of the region *out* to all pages covered by the requested
region, and we'll release a larger region of memory than intended. As
a result, the scavenger is currently disabled on these platforms.

Fix this by first rounding the region to be marked unused *in* to
multiples of the physical page size, so that when we ask the kernel to
mark it unused, it releases exactly the requested region.

Fixes #9993.

Change-Id: I96d5fdc2f77f9d69abadcea29bcfe55e68288cb1
Reviewed-on: https://go-review.googlesource.com/22066
Reviewed-by: Rick Hudson <rlh@golang.org>
src/runtime/mheap.go

index 895af9f07c2da52a767437c4f3177b4d53a9cb74..99f7b54fc8ebe82029b01391f7999f493145ccab 100644 (file)
@@ -824,15 +824,6 @@ func (h *mheap) busyList(npages uintptr) *mSpanList {
 }
 
 func scavengelist(list *mSpanList, now, limit uint64) uintptr {
-       if sys.PhysPageSize > _PageSize {
-               // golang.org/issue/9993
-               // If the physical page size of the machine is larger than
-               // our logical heap page size the kernel may round up the
-               // amount to be freed to its page size and corrupt the heap
-               // pages surrounding the unused block.
-               return 0
-       }
-
        if list.isEmpty() {
                return 0
        }
@@ -840,11 +831,30 @@ func scavengelist(list *mSpanList, now, limit uint64) uintptr {
        var sumreleased uintptr
        for s := list.first; s != nil; s = s.next {
                if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
-                       released := (s.npages - s.npreleased) << _PageShift
+                       start := uintptr(s.start) << _PageShift
+                       end := start + s.npages<<_PageShift
+                       if sys.PhysPageSize > _PageSize {
+                               // We can only release pages in
+                               // PhysPageSize blocks, so round start
+                               // and end in. (Otherwise, madvise
+                               // will round them *out* and release
+                               // more memory than we want.)
+                               start = (start + sys.PhysPageSize - 1) &^ (sys.PhysPageSize - 1)
+                               end &^= sys.PhysPageSize - 1
+                               if start == end {
+                                       continue
+                               }
+                       }
+                       len := end - start
+
+                       released := len - (s.npreleased << _PageShift)
+                       if sys.PhysPageSize > _PageSize && released == 0 {
+                               continue
+                       }
                        memstats.heap_released += uint64(released)
                        sumreleased += released
-                       s.npreleased = s.npages
-                       sysUnused(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
+                       s.npreleased = len >> _PageShift
+                       sysUnused(unsafe.Pointer(start), len)
                }
        }
        return sumreleased