From 9204aca6c2a95277f2e3df4215c515ab38c008c8 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Wed, 19 Mar 2025 11:46:41 -0400 Subject: [PATCH] [release-branch.go1.24] testing: separate b.Loop counter from b.N Currently, b.Loop uses b.N as the iteration count target. However, since it updates the target as it goes, the behavior is quite different from a b.N-style benchmark. To avoid user confusion, this CL gives b.Loop a separate, unexported iteration count target. It ensures b.N is 0 within the b.Loop loop to help catch misuses, and commits the final iteration count to b.N only once the loop is done (as the documentation states "After Loop returns false, b.N contains the total number of iterations that ran, so the benchmark may use b.N to compute other average metrics.") Since there are now two variables used by b.Loop, we put them in an unnamed struct. Also, we rename b.loopN to b.loop.i because this variable tracks the current iteration index (conventionally "i"), not the target (conventionally "n"). Unfortunately, a simple renaming causes B.Loop to be too large for the inliner. Thus, we make one simplification to B.Loop to keep it under the threshold. We're about to lean into that simplification anyway in a follow-up CL, so this is just temporary. For #72974. Change-Id: Ide1c4f1b9ca37f300f3beb0e60ba6202331b183e Reviewed-on: https://go-review.googlesource.com/c/go/+/659655 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Auto-Submit: Austin Clements Reviewed-on: https://go-review.googlesource.com/c/go/+/660556 Auto-Submit: Dmitri Shuralyov --- src/testing/benchmark.go | 54 ++++++++++++++++++++++++++-------------- src/testing/loop_test.go | 12 +++++++++ 2 files changed, 47 insertions(+), 19 deletions(-) diff --git a/src/testing/benchmark.go b/src/testing/benchmark.go index 166e3a0d16..1cc891c7fc 100644 --- a/src/testing/benchmark.go +++ b/src/testing/benchmark.go @@ -114,10 +114,17 @@ type B struct { netBytes uint64 // Extra metrics collected by ReportMetric. extra map[string]float64 - // For Loop() to be executed in benchFunc. - // Loop() has its own control logic that skips the loop scaling. - // See issue #61515. - loopN int + + // loop tracks the state of B.Loop + loop struct { + // n is the target number of iterations. It gets bumped up as we go. + // When the benchmark loop is done, we commit this to b.N so users can + // do reporting based on it, but we avoid exposing it until then. + n int + // i is the current Loop iteration. It's strictly monotonically + // increasing toward n. + i int + } } // StartTimer starts timing a test. This function is called automatically @@ -192,7 +199,8 @@ func (b *B) runN(n int) { runtime.GC() b.resetRaces() b.N = n - b.loopN = 0 + b.loop.n = 0 + b.loop.i = 0 b.ctx = ctx b.cancelCtx = cancelCtx @@ -312,8 +320,8 @@ func (b *B) launch() { }() // b.Loop does its own ramp-up logic so we just need to run it once. - // If b.loopN is non zero, it means b.Loop has already run. - if b.loopN == 0 { + // If b.loop.n is non zero, it means b.Loop has already run. + if b.loop.n == 0 { // Run the benchmark for at least the specified amount of time. if b.benchTime.n > 0 { // We already ran a single iteration in run1. @@ -372,34 +380,40 @@ func (b *B) stopOrScaleBLoop() bool { if t >= b.benchTime.d { // Stop the timer so we don't count cleanup time b.StopTimer() + // Commit iteration count + b.N = b.loop.n return false } // Loop scaling goalns := b.benchTime.d.Nanoseconds() - prevIters := int64(b.N) - b.N = predictN(goalns, prevIters, t.Nanoseconds(), prevIters) - b.loopN++ + prevIters := int64(b.loop.n) + b.loop.n = predictN(goalns, prevIters, t.Nanoseconds(), prevIters) + b.loop.i++ return true } func (b *B) loopSlowPath() bool { - if b.loopN == 0 { + if b.loop.n == 0 { // If it's the first call to b.Loop() in the benchmark function. // Allows more precise measurement of benchmark loop cost counts. - // Also initialize b.N to 1 to kick start loop scaling. - b.N = 1 - b.loopN = 1 + // Also initialize target to 1 to kick start loop scaling. + b.loop.n = 1 + // Within a b.Loop loop, we don't use b.N (to avoid confusion). + b.N = 0 + b.loop.i++ b.ResetTimer() return true } // Handles fixed iterations case if b.benchTime.n > 0 { - if b.N < b.benchTime.n { - b.N = b.benchTime.n - b.loopN++ + if b.loop.n < b.benchTime.n { + b.loop.n = b.benchTime.n + b.loop.i++ return true } b.StopTimer() + // Commit iteration count + b.N = b.loop.n return false } // Handles fixed time case @@ -440,8 +454,10 @@ func (b *B) loopSlowPath() bool { // whereas b.N-based benchmarks must run the benchmark function (and any // associated setup and cleanup) several times. func (b *B) Loop() bool { - if b.loopN != 0 && b.loopN < b.N { - b.loopN++ + // On the first call, both i and n are 0, so we'll fall through to the slow + // path in that case, too. + if b.loop.i < b.loop.n { + b.loop.i++ return true } return b.loopSlowPath() diff --git a/src/testing/loop_test.go b/src/testing/loop_test.go index 781a8566e8..7a42919643 100644 --- a/src/testing/loop_test.go +++ b/src/testing/loop_test.go @@ -11,6 +11,8 @@ func TestBenchmarkBLoop(t *T) { var runningEnd bool runs := 0 iters := 0 + firstBN := 0 + restBN := 0 finalBN := 0 bRet := Benchmark(func(b *B) { initialStart = b.start @@ -18,6 +20,9 @@ func TestBenchmarkBLoop(t *T) { for b.Loop() { if iters == 0 { firstStart = b.start + firstBN = b.N + } else { + restBN = max(restBN, b.N) } if iters == 1 { scaledStart = b.start @@ -39,6 +44,13 @@ func TestBenchmarkBLoop(t *T) { if finalBN != iters || bRet.N != iters { t.Errorf("benchmark iterations mismatch: %d loop iterations, final b.N=%d, bRet.N=%d", iters, finalBN, bRet.N) } + // Verify that b.N was 0 inside the loop + if firstBN != 0 { + t.Errorf("want b.N == 0 on first iteration, got %d", firstBN) + } + if restBN != 0 { + t.Errorf("want b.N == 0 on subsequent iterations, got %d", restBN) + } // Make sure the benchmark ran for an appropriate amount of time. if bRet.T < benchTime.d { t.Fatalf("benchmark ran for %s, want >= %s", bRet.T, benchTime.d) -- 2.48.1