]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: add a benchmark of Callers
authorAustin Clements <austin@google.com>
Tue, 28 Feb 2023 14:13:56 +0000 (09:13 -0500)
committerAustin Clements <austin@google.com>
Fri, 10 Mar 2023 17:18:20 +0000 (17:18 +0000)
We're about to make major changes to tracebacks. We have benchmarks of
stack copying, but not of PC buffer filling, so add some that we can
track through these changes.

For #54466.

Change-Id: I3ed61d75144ba03b61517cd9834eeb71c99d74df
Reviewed-on: https://go-review.googlesource.com/c/go/+/472956
TryBot-Result: Gopher Robot <gobot@golang.org>
Run-TryBot: Austin Clements <austin@google.com>
Reviewed-by: Michael Pratt <mpratt@google.com>
src/runtime/callers_test.go

index d245cbd2d2f1cc0193523f71780015d28e0e0722..d738076a2fecadba71baf819c4c1a8fd15400eb9 100644 (file)
@@ -339,3 +339,94 @@ func callerLine(t *testing.T, skip int) int {
        }
        return line
 }
+
+func BenchmarkCallers(b *testing.B) {
+       b.Run("cached", func(b *testing.B) {
+               // Very pcvalueCache-friendly, no inlining.
+               callersCached(b, 100)
+       })
+       b.Run("inlined", func(b *testing.B) {
+               // Some inlining, still pretty cache-friendly.
+               callersInlined(b, 100)
+       })
+       b.Run("no-cache", func(b *testing.B) {
+               // Cache-hostile
+               callersNoCache(b, 100)
+       })
+}
+
+func callersCached(b *testing.B, n int) int {
+       if n <= 0 {
+               pcs := make([]uintptr, 32)
+               b.ResetTimer()
+               for i := 0; i < b.N; i++ {
+                       runtime.Callers(0, pcs)
+               }
+               b.StopTimer()
+               return 0
+       }
+       return 1 + callersCached(b, n-1)
+}
+
+func callersInlined(b *testing.B, n int) int {
+       if n <= 0 {
+               pcs := make([]uintptr, 32)
+               b.ResetTimer()
+               for i := 0; i < b.N; i++ {
+                       runtime.Callers(0, pcs)
+               }
+               b.StopTimer()
+               return 0
+       }
+       return 1 + callersInlined1(b, n-1)
+}
+func callersInlined1(b *testing.B, n int) int { return callersInlined2(b, n) }
+func callersInlined2(b *testing.B, n int) int { return callersInlined3(b, n) }
+func callersInlined3(b *testing.B, n int) int { return callersInlined4(b, n) }
+func callersInlined4(b *testing.B, n int) int { return callersInlined(b, n) }
+
+func callersNoCache(b *testing.B, n int) int {
+       if n <= 0 {
+               pcs := make([]uintptr, 32)
+               b.ResetTimer()
+               for i := 0; i < b.N; i++ {
+                       runtime.Callers(0, pcs)
+               }
+               b.StopTimer()
+               return 0
+       }
+       switch n % 16 {
+       case 0:
+               return 1 + callersNoCache(b, n-1)
+       case 1:
+               return 1 + callersNoCache(b, n-1)
+       case 2:
+               return 1 + callersNoCache(b, n-1)
+       case 3:
+               return 1 + callersNoCache(b, n-1)
+       case 4:
+               return 1 + callersNoCache(b, n-1)
+       case 5:
+               return 1 + callersNoCache(b, n-1)
+       case 6:
+               return 1 + callersNoCache(b, n-1)
+       case 7:
+               return 1 + callersNoCache(b, n-1)
+       case 8:
+               return 1 + callersNoCache(b, n-1)
+       case 9:
+               return 1 + callersNoCache(b, n-1)
+       case 10:
+               return 1 + callersNoCache(b, n-1)
+       case 11:
+               return 1 + callersNoCache(b, n-1)
+       case 12:
+               return 1 + callersNoCache(b, n-1)
+       case 13:
+               return 1 + callersNoCache(b, n-1)
+       case 14:
+               return 1 + callersNoCache(b, n-1)
+       default:
+               return 1 + callersNoCache(b, n-1)
+       }
+}