"/gc/scan/globals:bytes": {
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
- out.scalar = gcController.globalsScan.Load()
+ out.scalar = in.gcStats.globalsScan
},
},
"/gc/scan/heap:bytes": {
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
- out.scalar = gcController.heapScan.Load()
+ out.scalar = in.gcStats.heapScan
+ },
+ },
+ "/gc/scan/stack:bytes": {
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.gcStats.stackScan
},
},
"/gc/scan/total:bytes": {
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
- out.scalar = gcController.globalsScan.Load() + gcController.heapScan.Load() + gcController.lastStackScan.Load()
+ out.scalar = in.gcStats.totalScan
},
},
"/gc/heap/allocs-by-size:bytes": {
hist.counts[len(hist.counts)-1] = memstats.gcPauseDist.overflow.Load()
},
},
- "/gc/scan/stack:bytes": {
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = uint64(gcController.lastStackScan.Load())
- },
- },
"/gc/stack/starting-size:bytes": {
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
heapStatsDep statDep = iota // corresponds to heapStatsAggregate
sysStatsDep // corresponds to sysStatsAggregate
cpuStatsDep // corresponds to cpuStatsAggregate
+ gcStatsDep // corresponds to gcStatsAggregate
numStatsDeps
)
// a.cpuStats.accumulate(nanotime(), gcphase == _GCmark)
}
+// cpuStatsAggregate represents various GC stats obtained from the runtime
+// acquired together to avoid skew and inconsistencies.
+type gcStatsAggregate struct {
+ heapScan uint64
+ stackScan uint64
+ globalsScan uint64
+ totalScan uint64
+}
+
+// compute populates the gcStatsAggregate with values from the runtime.
+func (a *gcStatsAggregate) compute() {
+ a.heapScan = gcController.heapScan.Load()
+ a.stackScan = uint64(gcController.lastStackScan.Load())
+ a.globalsScan = gcController.globalsScan.Load()
+ a.totalScan = a.heapScan + a.stackScan + a.globalsScan
+}
+
// nsToSec takes a duration in nanoseconds and converts it to seconds as
// a float64.
func nsToSec(ns int64) float64 {
heapStats heapStatsAggregate
sysStats sysStatsAggregate
cpuStats cpuStatsAggregate
+ gcStats gcStatsAggregate
}
// ensure populates statistics aggregates determined by deps if they
a.sysStats.compute()
case cpuStatsDep:
a.cpuStats.compute()
+ case gcStatsDep:
+ a.gcStats.compute()
}
}
a.ensured = a.ensured.union(missing)
numGC uint64
pauses uint64
}
+ var totalScan struct {
+ got, want uint64
+ }
var cpu struct {
gcAssist float64
gcDedicated float64
for i := range h.Counts {
gc.pauses += h.Counts[i]
}
+ case "/gc/scan/heap:bytes":
+ totalScan.want += samples[i].Value.Uint64()
+ case "/gc/scan/globals:bytes":
+ totalScan.want += samples[i].Value.Uint64()
+ case "/gc/scan/stack:bytes":
+ totalScan.want += samples[i].Value.Uint64()
+ case "/gc/scan/total:bytes":
+ totalScan.got = samples[i].Value.Uint64()
case "/sched/gomaxprocs:threads":
if got, want := samples[i].Value.Uint64(), uint64(runtime.GOMAXPROCS(-1)); got != want {
t.Errorf("gomaxprocs doesn't match runtime.GOMAXPROCS: got %d, want %d", got, want)
if gc.pauses < gc.numGC*2 {
t.Errorf("fewer pauses than expected: got %d, want at least %d", gc.pauses, gc.numGC*2)
}
+ if totalScan.got != totalScan.want {
+ t.Errorf("/gc/scan/total:bytes doesn't line up with sum of /gc/scan*: total %d vs. sum %d", totalScan.got, totalScan.want)
+ }
}
func BenchmarkReadMetricsLatency(b *testing.B) {