var Nanotime = nanotime
+var PhysHugePageSize = physHugePageSize
+
type LFNode struct {
Next uint64
Pushcnt uintptr
}
}
+// UnscavHugePagesSlow returns the value of mheap_.freeHugePages
+// and the number of unscavenged huge pages calculated by
+// scanning the heap.
+func UnscavHugePagesSlow() (uintptr, uintptr) {
+ var base, slow uintptr
+ // Run on the system stack to avoid deadlock from stack growth
+ // trying to acquire the heap lock.
+ systemstack(func() {
+ lock(&mheap_.lock)
+ base = mheap_.free.unscavHugePages
+ for _, s := range mheap_.allspans {
+ if s.state == mSpanFree && !s.scavenged {
+ slow += s.hugePages()
+ }
+ }
+ unlock(&mheap_.lock)
+ })
+ return base, slow
+}
+
// Span is a safe wrapper around an mspan, whose memory
// is managed manually.
type Span struct {
}
}
+func TestUnscavHugePages(t *testing.T) {
+ // Allocate 20 MiB and immediately free it a few times to increase
+ // the chance that unscavHugePages isn't zero and that some kind of
+ // accounting had to happen in the runtime.
+ for j := 0; j < 3; j++ {
+ var large [][]byte
+ for i := 0; i < 5; i++ {
+ large = append(large, make([]byte, runtime.PhysHugePageSize))
+ }
+ runtime.KeepAlive(large)
+ runtime.GC()
+ }
+ base, slow := runtime.UnscavHugePagesSlow()
+ if base != slow {
+ logDiff(t, "unscavHugePages", reflect.ValueOf(base), reflect.ValueOf(slow))
+ t.Fatal("unscavHugePages mismatch")
+ }
+}
+
func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
typ := got.Type()
switch typ.Kind() {
//go:notinheap
type mTreap struct {
- treap *treapNode
+ treap *treapNode
+ unscavHugePages uintptr // number of unscavenged huge pages in the treap
}
//go:notinheap
// insert adds span to the large span treap.
func (root *mTreap) insert(span *mspan) {
+ if !span.scavenged {
+ root.unscavHugePages += span.hugePages()
+ }
base := span.base()
var last *treapNode
pt := &root.treap
}
func (root *mTreap) removeNode(t *treapNode) {
+ if !t.span.scavenged {
+ root.unscavHugePages -= t.span.hugePages()
+ }
if t.span.base() != t.key {
throw("span and treap node base addresses do not match")
}
// on the swept stack.
sweepSpans [2]gcSweepBuf
- // _ uint32 // align uint64 fields on 32-bit for atomics
+ _ uint32 // align uint64 fields on 32-bit for atomics
// Proportional sweep
//