From ac6dea7aa1ccc3fa2b07be05ba71714e686f928a Mon Sep 17 00:00:00 2001 From: Rhys Hiltner Date: Wed, 22 May 2024 11:53:36 -0700 Subject: [PATCH] runtime: lower mutex contention test expectations As of https://go.dev/cl/586796, the runtime/metrics view of internal mutex contention is sampled at 1 per gTrackingPeriod, rather than either 1 (immediately prior to CL 586796) or the more frequent of gTrackingPeriod or the mutex profiling rate (Go 1.22). Thus, we no longer have a real lower bound on the amount of contention that runtime/metrics will report. Relax the test's expectations again. For #64253 Change-Id: I94e1d92348a03599a819ec8ac785a0eb3c1ddd73 Reviewed-on: https://go-review.googlesource.com/c/go/+/587515 LUCI-TryBot-Result: Go LUCI Reviewed-by: Carlos Amedee Reviewed-by: Michael Knyszek Auto-Submit: Rhys Hiltner --- src/runtime/metrics_test.go | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/src/runtime/metrics_test.go b/src/runtime/metrics_test.go index f4b87419b2..a86efed0e6 100644 --- a/src/runtime/metrics_test.go +++ b/src/runtime/metrics_test.go @@ -1036,6 +1036,13 @@ func TestRuntimeLockMetricsAndProfile(t *testing.T) { if metricGrowth == 0 && strictTiming { // If the critical section is very short, systems with low timer // resolution may be unable to measure it via nanotime. + // + // This is sampled at 1 per gTrackingPeriod, but the explicit + // runtime.mutex tests create 200 contention events. Observing + // zero of those has a probability of (7/8)^200 = 2.5e-12 which + // is acceptably low (though the calculation has a tenuous + // dependency on cheaprandn being a good-enough source of + // entropy). t.Errorf("no increase in /sync/mutex/wait/total:seconds metric") } // This comparison is possible because the time measurements in support of @@ -1111,7 +1118,7 @@ func TestRuntimeLockMetricsAndProfile(t *testing.T) { name := t.Name() t.Run("runtime.lock", func(t *testing.T) { - mus := make([]runtime.Mutex, 100) + mus := make([]runtime.Mutex, 200) var needContention atomic.Int64 baseDelay := 100 * time.Microsecond // large relative to system noise, for comparison between clocks fastDelayMicros := baseDelay.Microseconds() @@ -1207,13 +1214,19 @@ func TestRuntimeLockMetricsAndProfile(t *testing.T) { needContention.Store(int64(len(mus) - 1)) metricGrowth, profileGrowth, n, _ := testcase(true, stks, workers, fn)(t) - if have, want := metricGrowth, baseDelay.Seconds()*float64(len(mus)); have < want { - // The test imposes a delay with usleep, verified with calls to - // nanotime. Compare against the runtime/metrics package's view - // (based on nanotime) rather than runtime/pprof's view (based - // on cputicks). - t.Errorf("runtime/metrics reported less than the known minimum contention duration (%fs < %fs)", have, want) - } + t.Run("metric", func(t *testing.T) { + // The runtime/metrics view is sampled at 1 per gTrackingPeriod, + // so we don't have a hard lower bound here. + testenv.SkipFlaky(t, 64253) + + if have, want := metricGrowth, baseDelay.Seconds()*float64(len(mus)); have < want { + // The test imposes a delay with usleep, verified with calls to + // nanotime. Compare against the runtime/metrics package's view + // (based on nanotime) rather than runtime/pprof's view (based + // on cputicks). + t.Errorf("runtime/metrics reported less than the known minimum contention duration (%fs < %fs)", have, want) + } + }) if have, want := n, int64(len(mus)); have != want { t.Errorf("mutex profile reported contention count different from the known true count (%d != %d)", have, want) } -- 2.48.1