From: Michael Anthony Knyszek Date: Wed, 5 Aug 2020 23:10:46 +0000 (+0000) Subject: runtime: add readMetrics latency benchmark X-Git-Tag: go1.16beta1~513 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=74e566ed1dc52f7ef58093aff936a0931537a1ad;p=gostls13.git runtime: add readMetrics latency benchmark This change adds a new benchmark to the runtime tests for measuring the latency of the new metrics implementation, based on the ReadMemStats latency benchmark. readMetrics will have more metrics added to it in the future, and this benchmark will serve as a way to measure the cost of adding additional metrics. Change-Id: Ib05e3ed4afa49a70863fc0c418eab35b72263e24 Reviewed-on: https://go-review.googlesource.com/c/go/+/247042 Run-TryBot: Michael Knyszek TryBot-Result: Go Bot Trust: Michael Knyszek Reviewed-by: Emmanuel Odeke Reviewed-by: Michael Pratt --- diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go index 9edebdada6..7870f31ae9 100644 --- a/src/runtime/gc_test.go +++ b/src/runtime/gc_test.go @@ -518,7 +518,7 @@ func BenchmarkReadMemStats(b *testing.B) { hugeSink = nil } -func BenchmarkReadMemStatsLatency(b *testing.B) { +func applyGCLoad(b *testing.B) func() { // We’ll apply load to the runtime with maxProcs-1 goroutines // and use one more to actually benchmark. It doesn't make sense // to try to run this test with only 1 P (that's what @@ -563,6 +563,14 @@ func BenchmarkReadMemStatsLatency(b *testing.B) { runtime.KeepAlive(hold) }() } + return func() { + close(done) + wg.Wait() + } +} + +func BenchmarkReadMemStatsLatency(b *testing.B) { + stop := applyGCLoad(b) // Spend this much time measuring latencies. latencies := make([]time.Duration, 0, 1024) @@ -579,12 +587,11 @@ func BenchmarkReadMemStatsLatency(b *testing.B) { runtime.ReadMemStats(&ms) latencies = append(latencies, time.Now().Sub(start)) } - close(done) - // Make sure to stop the timer before we wait! The goroutines above - // are very heavy-weight and not easy to stop, so we could end up + // Make sure to stop the timer before we wait! The load created above + // is very heavy-weight and not easy to stop, so we could end up // confusing the benchmarking framework for small b.N. b.StopTimer() - wg.Wait() + stop() // Disable the default */op metrics. // ns/op doesn't mean anything because it's an average, but we diff --git a/src/runtime/metrics_test.go b/src/runtime/metrics_test.go index f00aad07c4..d925b057b0 100644 --- a/src/runtime/metrics_test.go +++ b/src/runtime/metrics_test.go @@ -7,8 +7,10 @@ package runtime_test import ( "runtime" "runtime/metrics" + "sort" "strings" "testing" + "time" "unsafe" ) @@ -112,3 +114,39 @@ func TestReadMetricsConsistency(t *testing.T) { t.Errorf(`"/memory/classes/total:bytes" does not match sum of /memory/classes/**: got %d, want %d`, totalVirtual.got, totalVirtual.want) } } + +func BenchmarkReadMetricsLatency(b *testing.B) { + stop := applyGCLoad(b) + + // Spend this much time measuring latencies. + latencies := make([]time.Duration, 0, 1024) + _, samples := prepareAllMetricsSamples() + + // Hit metrics.Read continuously and measure. + b.ResetTimer() + for i := 0; i < b.N; i++ { + start := time.Now() + metrics.Read(samples) + latencies = append(latencies, time.Now().Sub(start)) + } + // Make sure to stop the timer before we wait! The load created above + // is very heavy-weight and not easy to stop, so we could end up + // confusing the benchmarking framework for small b.N. + b.StopTimer() + stop() + + // Disable the default */op metrics. + // ns/op doesn't mean anything because it's an average, but we + // have a sleep in our b.N loop above which skews this significantly. + b.ReportMetric(0, "ns/op") + b.ReportMetric(0, "B/op") + b.ReportMetric(0, "allocs/op") + + // Sort latencies then report percentiles. + sort.Slice(latencies, func(i, j int) bool { + return latencies[i] < latencies[j] + }) + b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns") + b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns") + b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns") +}