]> Cypherpunks repositories - gostls13.git/commitdiff
sync: add Pool benchmarks to stress STW and reuse
authorAustin Clements <austin@google.com>
Fri, 8 Mar 2019 20:01:34 +0000 (15:01 -0500)
committerAustin Clements <austin@google.com>
Fri, 5 Apr 2019 18:49:06 +0000 (18:49 +0000)
This adds two benchmarks that will highlight two problems in Pool that
we're about to address.

The first benchmark measures the impact of large Pools on GC STW time.
Currently, STW time is O(# of items in Pools), and this benchmark
demonstrates 70µs STW times.

The second benchmark measures the impact of fully clearing all Pools
on each GC. Typically this is a problem in heavily-loaded systems
because it causes a spike in allocation. This benchmark stresses this
by simulating an expensive "New" function, so the cost of creating new
objects is reflected in the ns/op of the benchmark.

For #22950, #22331.

Change-Id: I0c8853190d23144026fa11837b6bf42adc461722
Reviewed-on: https://go-review.googlesource.com/c/go/+/166959
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
src/sync/pool_test.go

index 62085b5c966050fad28a2a1013167db4282a818f..5649a9dc83712876a02bb9f1e13bc4d2defdd1cc 100644 (file)
@@ -10,6 +10,7 @@ package sync_test
 import (
        "runtime"
        "runtime/debug"
+       "sort"
        . "sync"
        "sync/atomic"
        "testing"
@@ -253,3 +254,84 @@ func BenchmarkPoolOverflow(b *testing.B) {
                }
        })
 }
+
+var globalSink interface{}
+
+func BenchmarkPoolSTW(b *testing.B) {
+       // Take control of GC.
+       defer debug.SetGCPercent(debug.SetGCPercent(-1))
+
+       var mstats runtime.MemStats
+       var pauses []uint64
+
+       var p Pool
+       for i := 0; i < b.N; i++ {
+               // Put a large number of items into a pool.
+               const N = 100000
+               var item interface{} = 42
+               for i := 0; i < N; i++ {
+                       p.Put(item)
+               }
+               // Do a GC.
+               runtime.GC()
+               // Record pause time.
+               runtime.ReadMemStats(&mstats)
+               pauses = append(pauses, mstats.PauseNs[(mstats.NumGC+255)%256])
+       }
+
+       // Get pause time stats.
+       sort.Slice(pauses, func(i, j int) bool { return pauses[i] < pauses[j] })
+       var total uint64
+       for _, ns := range pauses {
+               total += ns
+       }
+       // ns/op for this benchmark is average STW time.
+       b.ReportMetric(float64(total)/float64(b.N), "ns/op")
+       b.ReportMetric(float64(pauses[len(pauses)*95/100]), "p95-ns/STW")
+       b.ReportMetric(float64(pauses[len(pauses)*50/100]), "p50-ns/STW")
+}
+
+func BenchmarkPoolExpensiveNew(b *testing.B) {
+       // Populate a pool with items that are expensive to construct
+       // to stress pool cleanup and subsequent reconstruction.
+
+       // Create a ballast so the GC has a non-zero heap size and
+       // runs at reasonable times.
+       globalSink = make([]byte, 8<<20)
+       defer func() { globalSink = nil }()
+
+       // Create a pool that's "expensive" to fill.
+       var p Pool
+       var nNew uint64
+       p.New = func() interface{} {
+               atomic.AddUint64(&nNew, 1)
+               time.Sleep(time.Millisecond)
+               return 42
+       }
+       var mstats1, mstats2 runtime.MemStats
+       runtime.ReadMemStats(&mstats1)
+       b.RunParallel(func(pb *testing.PB) {
+               // Simulate 100X the number of goroutines having items
+               // checked out from the Pool simultaneously.
+               items := make([]interface{}, 100)
+               var sink []byte
+               for pb.Next() {
+                       // Stress the pool.
+                       for i := range items {
+                               items[i] = p.Get()
+                               // Simulate doing some work with this
+                               // item checked out.
+                               sink = make([]byte, 32<<10)
+                       }
+                       for i, v := range items {
+                               p.Put(v)
+                               items[i] = nil
+                       }
+               }
+               _ = sink
+       })
+       runtime.ReadMemStats(&mstats2)
+
+       b.ReportMetric(float64(mstats2.NumGC-mstats1.NumGC)/float64(b.N), "GCs/op")
+       b.ReportMetric(float64(nNew)/float64(b.N), "New/op")
+}