]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: remove ptr/scalar bitmap metric
authorkhr@golang.org <khr@golang.org>
Thu, 8 May 2025 17:00:22 +0000 (10:00 -0700)
committerGopher Robot <gobot@golang.org>
Thu, 8 May 2025 18:21:51 +0000 (11:21 -0700)
We don't use this mechanism any more, so the metric will always be zero.
Since CL 616255.

Update #73628

Change-Id: Ic179927a8bc24e6291876c218d88e8848b057c2a
Reviewed-on: https://go-review.googlesource.com/c/go/+/671096
Reviewed-by: Keith Randall <khr@google.com>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
Auto-Submit: Keith Randall <khr@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>

src/runtime/align_runtime_test.go
src/runtime/metrics.go
src/runtime/mheap.go
src/runtime/mstats.go

index 6d77e0d3d4a2fb8bcd151b1ee06d9fa730c6925f..4bcb49db2f51dce9e7da0fedcec4d94cfc270b61 100644 (file)
@@ -28,7 +28,6 @@ var AtomicFields = []uintptr{
        unsafe.Offsetof(heapStatsDelta{}.released),
        unsafe.Offsetof(heapStatsDelta{}.inHeap),
        unsafe.Offsetof(heapStatsDelta{}.inStacks),
-       unsafe.Offsetof(heapStatsDelta{}.inPtrScalarBits),
        unsafe.Offsetof(heapStatsDelta{}.inWorkBufs),
        unsafe.Offsetof(lfnode{}.next),
        unsafe.Offsetof(mstats{}.last_gc_nanotime),
index 949a2d42bd0da0cd7568d41b8a69d29c7537ddcb..48da745521a27c698adff60bc2c9726634331069 100644 (file)
@@ -333,8 +333,7 @@ func initMetrics() {
                        compute: func(in *statAggregate, out *metricValue) {
                                out.kind = metricKindUint64
                                out.scalar = uint64(in.heapStats.committed - in.heapStats.inHeap -
-                                       in.heapStats.inStacks - in.heapStats.inWorkBufs -
-                                       in.heapStats.inPtrScalarBits)
+                                       in.heapStats.inStacks - in.heapStats.inWorkBufs)
                        },
                },
                "/memory/classes/heap/objects:bytes": {
@@ -397,7 +396,7 @@ func initMetrics() {
                        deps: makeStatDepSet(heapStatsDep, sysStatsDep),
                        compute: func(in *statAggregate, out *metricValue) {
                                out.kind = metricKindUint64
-                               out.scalar = uint64(in.heapStats.inWorkBufs+in.heapStats.inPtrScalarBits) + in.sysStats.gcMiscSys
+                               out.scalar = uint64(in.heapStats.inWorkBufs) + in.sysStats.gcMiscSys
                        },
                },
                "/memory/classes/os-stacks:bytes": {
index dbad51dcbf0e060954b7b4ba3c4f84999b215814..5a27ab5e78f3ed549965c481ce120899b204b6f2 100644 (file)
@@ -979,10 +979,9 @@ func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
 type spanAllocType uint8
 
 const (
-       spanAllocHeap          spanAllocType = iota // heap span
-       spanAllocStack                              // stack span
-       spanAllocPtrScalarBits                      // unrolled GC prog bitmap span
-       spanAllocWorkBuf                            // work buf span
+       spanAllocHeap    spanAllocType = iota // heap span
+       spanAllocStack                        // stack span
+       spanAllocWorkBuf                      // work buf span
 )
 
 // manual returns true if the span allocation is manually managed.
@@ -1407,8 +1406,6 @@ HaveSpan:
                atomic.Xaddint64(&stats.inHeap, int64(nbytes))
        case spanAllocStack:
                atomic.Xaddint64(&stats.inStacks, int64(nbytes))
-       case spanAllocPtrScalarBits:
-               atomic.Xaddint64(&stats.inPtrScalarBits, int64(nbytes))
        case spanAllocWorkBuf:
                atomic.Xaddint64(&stats.inWorkBufs, int64(nbytes))
        }
@@ -1719,8 +1716,6 @@ func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
                atomic.Xaddint64(&stats.inHeap, -int64(nbytes))
        case spanAllocStack:
                atomic.Xaddint64(&stats.inStacks, -int64(nbytes))
-       case spanAllocPtrScalarBits:
-               atomic.Xaddint64(&stats.inPtrScalarBits, -int64(nbytes))
        case spanAllocWorkBuf:
                atomic.Xaddint64(&stats.inWorkBufs, -int64(nbytes))
        }
index 5507b873e5b2dd8bf17f9a5349fd99fb20688eb7..29ace5ec16f96258b15c51f8592aef58a384e7a0 100644 (file)
@@ -442,12 +442,11 @@ func readmemstats_m(stats *MemStats) {
 
        stackInUse := uint64(consStats.inStacks)
        gcWorkBufInUse := uint64(consStats.inWorkBufs)
-       gcProgPtrScalarBitsInUse := uint64(consStats.inPtrScalarBits)
 
        totalMapped := gcController.heapInUse.load() + gcController.heapFree.load() + gcController.heapReleased.load() +
                memstats.stacks_sys.load() + memstats.mspan_sys.load() + memstats.mcache_sys.load() +
                memstats.buckhash_sys.load() + memstats.gcMiscSys.load() + memstats.other_sys.load() +
-               stackInUse + gcWorkBufInUse + gcProgPtrScalarBitsInUse
+               stackInUse + gcWorkBufInUse
 
        heapGoal := gcController.heapGoal()
 
@@ -461,7 +460,7 @@ func readmemstats_m(stats *MemStats) {
                //
                // * memstats.heapInUse == inHeap
                // * memstats.heapReleased == released
-               // * memstats.heapInUse + memstats.heapFree == committed - inStacks - inWorkBufs - inPtrScalarBits
+               // * memstats.heapInUse + memstats.heapFree == committed - inStacks - inWorkBufs
                // * memstats.totalAlloc == totalAlloc
                // * memstats.totalFree == totalFree
                //
@@ -482,7 +481,7 @@ func readmemstats_m(stats *MemStats) {
                        throw("heapReleased and consistent stats are not equal")
                }
                heapRetained := gcController.heapInUse.load() + gcController.heapFree.load()
-               consRetained := uint64(consStats.committed - consStats.inStacks - consStats.inWorkBufs - consStats.inPtrScalarBits)
+               consRetained := uint64(consStats.committed - consStats.inStacks - consStats.inWorkBufs)
                if heapRetained != consRetained {
                        print("runtime: global value=", heapRetained, "\n")
                        print("runtime: consistent value=", consRetained, "\n")
@@ -533,8 +532,8 @@ func readmemstats_m(stats *MemStats) {
        //
        // or
        //
-       // HeapSys = sys - stacks_inuse - gcWorkBufInUse - gcProgPtrScalarBitsInUse
-       // HeapIdle = sys - stacks_inuse - gcWorkBufInUse - gcProgPtrScalarBitsInUse - heapInUse
+       // HeapSys = sys - stacks_inuse - gcWorkBufInUse
+       // HeapIdle = sys - stacks_inuse - gcWorkBufInUse - heapInUse
        //
        // => HeapIdle = HeapSys - heapInUse = heapFree + heapReleased
        stats.HeapIdle = gcController.heapFree.load() + gcController.heapReleased.load()
@@ -553,7 +552,7 @@ func readmemstats_m(stats *MemStats) {
        // MemStats defines GCSys as an aggregate of all memory related
        // to the memory management system, but we track this memory
        // at a more granular level in the runtime.
-       stats.GCSys = memstats.gcMiscSys.load() + gcWorkBufInUse + gcProgPtrScalarBitsInUse
+       stats.GCSys = memstats.gcMiscSys.load() + gcWorkBufInUse
        stats.OtherSys = memstats.other_sys.load()
        stats.NextGC = heapGoal
        stats.LastGC = memstats.last_gc_unix
@@ -678,12 +677,11 @@ func (s *sysMemStat) add(n int64) {
 // consistent with one another.
 type heapStatsDelta struct {
        // Memory stats.
-       committed       int64 // byte delta of memory committed
-       released        int64 // byte delta of released memory generated
-       inHeap          int64 // byte delta of memory placed in the heap
-       inStacks        int64 // byte delta of memory reserved for stacks
-       inWorkBufs      int64 // byte delta of memory reserved for work bufs
-       inPtrScalarBits int64 // byte delta of memory reserved for unrolled GC prog bits
+       committed  int64 // byte delta of memory committed
+       released   int64 // byte delta of released memory generated
+       inHeap     int64 // byte delta of memory placed in the heap
+       inStacks   int64 // byte delta of memory reserved for stacks
+       inWorkBufs int64 // byte delta of memory reserved for work bufs
 
        // Allocator stats.
        //
@@ -709,7 +707,6 @@ func (a *heapStatsDelta) merge(b *heapStatsDelta) {
        a.inHeap += b.inHeap
        a.inStacks += b.inStacks
        a.inWorkBufs += b.inWorkBufs
-       a.inPtrScalarBits += b.inPtrScalarBits
 
        a.tinyAllocCount += b.tinyAllocCount
        a.largeAlloc += b.largeAlloc