From f4a5ae5594a21ea276d473fe9f804a30adbd8d07 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Mon, 29 Apr 2019 21:02:18 +0000 Subject: [PATCH] runtime: track the number of free unscavenged huge pages This change tracks the number of potential free and unscavenged huge pages which will be used to inform the rate at which scavenging should occur. For #30333. Change-Id: I47663e5ffb64cac44ffa10db158486783f707479 Reviewed-on: https://go-review.googlesource.com/c/go/+/170860 Run-TryBot: Michael Knyszek TryBot-Result: Gobot Gobot Reviewed-by: Austin Clements --- src/runtime/export_test.go | 22 ++++++++++++++++++++++ src/runtime/gc_test.go | 19 +++++++++++++++++++ src/runtime/mgclarge.go | 9 ++++++++- src/runtime/mheap.go | 2 +- 4 files changed, 50 insertions(+), 2 deletions(-) diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 852f37409e..3c3e110f89 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -36,6 +36,8 @@ var Atoi32 = atoi32 var Nanotime = nanotime +var PhysHugePageSize = physHugePageSize + type LFNode struct { Next uint64 Pushcnt uintptr @@ -516,6 +518,26 @@ func MapTombstoneCheck(m map[int]int) { } } +// UnscavHugePagesSlow returns the value of mheap_.freeHugePages +// and the number of unscavenged huge pages calculated by +// scanning the heap. +func UnscavHugePagesSlow() (uintptr, uintptr) { + var base, slow uintptr + // Run on the system stack to avoid deadlock from stack growth + // trying to acquire the heap lock. + systemstack(func() { + lock(&mheap_.lock) + base = mheap_.free.unscavHugePages + for _, s := range mheap_.allspans { + if s.state == mSpanFree && !s.scavenged { + slow += s.hugePages() + } + } + unlock(&mheap_.lock) + }) + return base, slow +} + // Span is a safe wrapper around an mspan, whose memory // is managed manually. type Span struct { diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go index 51e8ea4d31..d55a934519 100644 --- a/src/runtime/gc_test.go +++ b/src/runtime/gc_test.go @@ -470,6 +470,25 @@ func TestReadMemStats(t *testing.T) { } } +func TestUnscavHugePages(t *testing.T) { + // Allocate 20 MiB and immediately free it a few times to increase + // the chance that unscavHugePages isn't zero and that some kind of + // accounting had to happen in the runtime. + for j := 0; j < 3; j++ { + var large [][]byte + for i := 0; i < 5; i++ { + large = append(large, make([]byte, runtime.PhysHugePageSize)) + } + runtime.KeepAlive(large) + runtime.GC() + } + base, slow := runtime.UnscavHugePagesSlow() + if base != slow { + logDiff(t, "unscavHugePages", reflect.ValueOf(base), reflect.ValueOf(slow)) + t.Fatal("unscavHugePages mismatch") + } +} + func logDiff(t *testing.T, prefix string, got, want reflect.Value) { typ := got.Type() switch typ.Kind() { diff --git a/src/runtime/mgclarge.go b/src/runtime/mgclarge.go index b1e7c23e25..857bc6108a 100644 --- a/src/runtime/mgclarge.go +++ b/src/runtime/mgclarge.go @@ -40,7 +40,8 @@ import ( //go:notinheap type mTreap struct { - treap *treapNode + treap *treapNode + unscavHugePages uintptr // number of unscavenged huge pages in the treap } //go:notinheap @@ -378,6 +379,9 @@ func (root *mTreap) end(mask, match treapIterType) treapIter { // insert adds span to the large span treap. func (root *mTreap) insert(span *mspan) { + if !span.scavenged { + root.unscavHugePages += span.hugePages() + } base := span.base() var last *treapNode pt := &root.treap @@ -435,6 +439,9 @@ func (root *mTreap) insert(span *mspan) { } func (root *mTreap) removeNode(t *treapNode) { + if !t.span.scavenged { + root.unscavHugePages -= t.span.hugePages() + } if t.span.base() != t.key { throw("span and treap node base addresses do not match") } diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index b14a28fc13..d033a9d026 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -59,7 +59,7 @@ type mheap struct { // on the swept stack. sweepSpans [2]gcSweepBuf - // _ uint32 // align uint64 fields on 32-bit for atomics + _ uint32 // align uint64 fields on 32-bit for atomics // Proportional sweep // -- 2.50.0