]> Cypherpunks repositories - gostls13.git/commitdiff
sync: use RunParallel in benchmarks
authorDmitriy Vyukov <dvyukov@google.com>
Tue, 25 Feb 2014 10:39:12 +0000 (14:39 +0400)
committerDmitriy Vyukov <dvyukov@google.com>
Tue, 25 Feb 2014 10:39:12 +0000 (14:39 +0400)
LGTM=bradfitz
R=golang-codereviews, bradfitz
CC=golang-codereviews
https://golang.org/cl/68050043

src/pkg/sync/mutex_test.go
src/pkg/sync/once_test.go
src/pkg/sync/pool_test.go
src/pkg/sync/runtime_sema_test.go
src/pkg/sync/rwmutex_test.go
src/pkg/sync/waitgroup_test.go

index bf78c6f609ca67c7e0ce42822521900dc089ac61..151b25c10fce9c9eacf076710145676a81c1f802 100644 (file)
@@ -9,7 +9,6 @@ package sync_test
 import (
        "runtime"
        . "sync"
-       "sync/atomic"
        "testing"
 )
 
@@ -90,63 +89,34 @@ func BenchmarkMutexUncontended(b *testing.B) {
                Mutex
                pad [128]uint8
        }
-       const CallsPerSched = 1000
-       procs := runtime.GOMAXPROCS(-1)
-       N := int32(b.N / CallsPerSched)
-       c := make(chan bool, procs)
-       for p := 0; p < procs; p++ {
-               go func() {
-                       var mu PaddedMutex
-                       for atomic.AddInt32(&N, -1) >= 0 {
-                               runtime.Gosched()
-                               for g := 0; g < CallsPerSched; g++ {
-                                       mu.Lock()
-                                       mu.Unlock()
-                               }
-                       }
-                       c <- true
-               }()
-       }
-       for p := 0; p < procs; p++ {
-               <-c
-       }
+       b.RunParallel(func(pb *testing.PB) {
+               var mu PaddedMutex
+               for pb.Next() {
+                       mu.Lock()
+                       mu.Unlock()
+               }
+       })
 }
 
 func benchmarkMutex(b *testing.B, slack, work bool) {
-       const (
-               CallsPerSched  = 1000
-               LocalWork      = 100
-               GoroutineSlack = 10
-       )
-       procs := runtime.GOMAXPROCS(-1)
+       var mu Mutex
        if slack {
-               procs *= GoroutineSlack
+               b.SetParallelism(10)
        }
-       N := int32(b.N / CallsPerSched)
-       c := make(chan bool, procs)
-       var mu Mutex
-       for p := 0; p < procs; p++ {
-               go func() {
-                       foo := 0
-                       for atomic.AddInt32(&N, -1) >= 0 {
-                               runtime.Gosched()
-                               for g := 0; g < CallsPerSched; g++ {
-                                       mu.Lock()
-                                       mu.Unlock()
-                                       if work {
-                                               for i := 0; i < LocalWork; i++ {
-                                                       foo *= 2
-                                                       foo /= 2
-                                               }
-                                       }
+       b.RunParallel(func(pb *testing.PB) {
+               foo := 0
+               for pb.Next() {
+                       mu.Lock()
+                       mu.Unlock()
+                       if work {
+                               for i := 0; i < 100; i++ {
+                                       foo *= 2
+                                       foo /= 2
                                }
                        }
-                       c <- foo == 42
-               }()
-       }
-       for p := 0; p < procs; p++ {
-               <-c
-       }
+               }
+               _ = foo
+       })
 }
 
 func BenchmarkMutex(b *testing.B) {
index 183069a1a234dd9a8643b4d37fab25fe2c0ff10b..8afda82f3e18dc74f77209c4d652eaa9a49c4d48 100644 (file)
@@ -5,9 +5,7 @@
 package sync_test
 
 import (
-       "runtime"
        . "sync"
-       "sync/atomic"
        "testing"
 )
 
@@ -62,24 +60,11 @@ func TestOncePanic(t *testing.T) {
 }
 
 func BenchmarkOnce(b *testing.B) {
-       const CallsPerSched = 1000
-       procs := runtime.GOMAXPROCS(-1)
-       N := int32(b.N / CallsPerSched)
        var once Once
        f := func() {}
-       c := make(chan bool, procs)
-       for p := 0; p < procs; p++ {
-               go func() {
-                       for atomic.AddInt32(&N, -1) >= 0 {
-                               runtime.Gosched()
-                               for g := 0; g < CallsPerSched; g++ {
-                                       once.Do(f)
-                               }
-                       }
-                       c <- true
-               }()
-       }
-       for p := 0; p < procs; p++ {
-               <-c
-       }
+       b.RunParallel(func(pb *testing.PB) {
+               for pb.Next() {
+                       once.Do(f)
+               }
+       })
 }
index 7e02f69d6c399c06a629ed73582f6f3af6d9f77d..a34719ab2ce0a99f1e2f5ae429fd960f1604c637 100644 (file)
@@ -128,42 +128,24 @@ func TestPoolStress(t *testing.T) {
 
 func BenchmarkPool(b *testing.B) {
        var p Pool
-       var wg WaitGroup
-       n0 := uintptr(b.N)
-       n := n0
-       for i := 0; i < runtime.GOMAXPROCS(0); i++ {
-               wg.Add(1)
-               go func() {
-                       defer wg.Done()
-                       for atomic.AddUintptr(&n, ^uintptr(0)) < n0 {
-                               for b := 0; b < 100; b++ {
-                                       p.Put(1)
-                                       p.Get()
-                               }
-                       }
-               }()
-       }
-       wg.Wait()
+       b.RunParallel(func(pb *testing.PB) {
+               for pb.Next() {
+                       p.Put(1)
+                       p.Get()
+               }
+       })
 }
 
 func BenchmarkPoolOverlflow(b *testing.B) {
        var p Pool
-       var wg WaitGroup
-       n0 := uintptr(b.N)
-       n := n0
-       for i := 0; i < runtime.GOMAXPROCS(0); i++ {
-               wg.Add(1)
-               go func() {
-                       defer wg.Done()
-                       for atomic.AddUintptr(&n, ^uintptr(0)) < n0 {
-                               for b := 0; b < 100; b++ {
-                                       p.Put(1)
-                               }
-                               for b := 0; b < 100; b++ {
-                                       p.Get()
-                               }
+       b.RunParallel(func(pb *testing.PB) {
+               for pb.Next() {
+                       for b := 0; b < 100; b++ {
+                               p.Put(1)
                        }
-               }()
-       }
-       wg.Wait()
+                       for b := 0; b < 100; b++ {
+                               p.Get()
+                       }
+               }
+       })
 }
index 57a8dbee78398f7173384c49f849f17af3c6cb0b..5b7dd3df3f0ff58001c3fd17338f157c214043d9 100644 (file)
@@ -7,7 +7,6 @@ package sync_test
 import (
        "runtime"
        . "sync"
-       "sync/atomic"
        "testing"
 )
 
@@ -16,72 +15,44 @@ func BenchmarkSemaUncontended(b *testing.B) {
                sem uint32
                pad [32]uint32
        }
-       const CallsPerSched = 1000
-       procs := runtime.GOMAXPROCS(-1)
-       N := int32(b.N / CallsPerSched)
-       c := make(chan bool, procs)
-       for p := 0; p < procs; p++ {
-               go func() {
-                       sem := new(PaddedSem)
-                       for atomic.AddInt32(&N, -1) >= 0 {
-                               runtime.Gosched()
-                               for g := 0; g < CallsPerSched; g++ {
-                                       Runtime_Semrelease(&sem.sem)
-                                       Runtime_Semacquire(&sem.sem)
-                               }
-                       }
-                       c <- true
-               }()
-       }
-       for p := 0; p < procs; p++ {
-               <-c
-       }
+       b.RunParallel(func(pb *testing.PB) {
+               sem := new(PaddedSem)
+               for pb.Next() {
+                       Runtime_Semrelease(&sem.sem)
+                       Runtime_Semacquire(&sem.sem)
+               }
+       })
 }
 
 func benchmarkSema(b *testing.B, block, work bool) {
-       const CallsPerSched = 1000
-       const LocalWork = 100
-       procs := runtime.GOMAXPROCS(-1)
-       N := int32(b.N / CallsPerSched)
-       c := make(chan bool, procs)
-       c2 := make(chan bool, procs/2)
        sem := uint32(0)
        if block {
-               for p := 0; p < procs/2; p++ {
-                       go func() {
-                               Runtime_Semacquire(&sem)
-                               c2 <- true
-                       }()
-               }
-       }
-       for p := 0; p < procs; p++ {
+               done := make(chan bool)
                go func() {
-                       foo := 0
-                       for atomic.AddInt32(&N, -1) >= 0 {
-                               runtime.Gosched()
-                               for g := 0; g < CallsPerSched; g++ {
-                                       Runtime_Semrelease(&sem)
-                                       if work {
-                                               for i := 0; i < LocalWork; i++ {
-                                                       foo *= 2
-                                                       foo /= 2
-                                               }
-                                       }
-                                       Runtime_Semacquire(&sem)
-                               }
+                       for p := 0; p < runtime.GOMAXPROCS(0)/2; p++ {
+                               Runtime_Semacquire(&sem)
                        }
-                       c <- foo == 42
-                       Runtime_Semrelease(&sem)
+                       done <- true
+               }()
+               defer func() {
+                       <-done
                }()
        }
-       if block {
-               for p := 0; p < procs/2; p++ {
-                       <-c2
+       b.RunParallel(func(pb *testing.PB) {
+               foo := 0
+               for pb.Next() {
+                       Runtime_Semrelease(&sem)
+                       if work {
+                               for i := 0; i < 100; i++ {
+                                       foo *= 2
+                                       foo /= 2
+                               }
+                       }
+                       Runtime_Semacquire(&sem)
                }
-       }
-       for p := 0; p < procs; p++ {
-               <-c
-       }
+               _ = foo
+               Runtime_Semrelease(&sem)
+       })
 }
 
 func BenchmarkSemaSyntNonblock(b *testing.B) {
index 39d5d6540de7853680e6f813983508436ea1fab6..0436f97239c7a16b823e6b05b4772bc134d064f0 100644 (file)
@@ -160,64 +160,39 @@ func BenchmarkRWMutexUncontended(b *testing.B) {
                RWMutex
                pad [32]uint32
        }
-       const CallsPerSched = 1000
-       procs := runtime.GOMAXPROCS(-1)
-       N := int32(b.N / CallsPerSched)
-       c := make(chan bool, procs)
-       for p := 0; p < procs; p++ {
-               go func() {
-                       var rwm PaddedRWMutex
-                       for atomic.AddInt32(&N, -1) >= 0 {
-                               runtime.Gosched()
-                               for g := 0; g < CallsPerSched; g++ {
-                                       rwm.RLock()
-                                       rwm.RLock()
-                                       rwm.RUnlock()
-                                       rwm.RUnlock()
-                                       rwm.Lock()
-                                       rwm.Unlock()
-                               }
-                       }
-                       c <- true
-               }()
-       }
-       for p := 0; p < procs; p++ {
-               <-c
-       }
+       b.RunParallel(func(pb *testing.PB) {
+               var rwm PaddedRWMutex
+               for pb.Next() {
+                       rwm.RLock()
+                       rwm.RLock()
+                       rwm.RUnlock()
+                       rwm.RUnlock()
+                       rwm.Lock()
+                       rwm.Unlock()
+               }
+       })
 }
 
 func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
-       const CallsPerSched = 1000
-       procs := runtime.GOMAXPROCS(-1)
-       N := int32(b.N / CallsPerSched)
-       c := make(chan bool, procs)
        var rwm RWMutex
-       for p := 0; p < procs; p++ {
-               go func() {
-                       foo := 0
-                       for atomic.AddInt32(&N, -1) >= 0 {
-                               runtime.Gosched()
-                               for g := 0; g < CallsPerSched; g++ {
-                                       foo++
-                                       if foo%writeRatio == 0 {
-                                               rwm.Lock()
-                                               rwm.Unlock()
-                                       } else {
-                                               rwm.RLock()
-                                               for i := 0; i != localWork; i += 1 {
-                                                       foo *= 2
-                                                       foo /= 2
-                                               }
-                                               rwm.RUnlock()
-                                       }
+       b.RunParallel(func(pb *testing.PB) {
+               foo := 0
+               for pb.Next() {
+                       foo++
+                       if foo%writeRatio == 0 {
+                               rwm.Lock()
+                               rwm.Unlock()
+                       } else {
+                               rwm.RLock()
+                               for i := 0; i != localWork; i += 1 {
+                                       foo *= 2
+                                       foo /= 2
                                }
+                               rwm.RUnlock()
                        }
-                       c <- foo == 42
-               }()
-       }
-       for p := 0; p < procs; p++ {
-               <-c
-       }
+               }
+               _ = foo
+       })
 }
 
 func BenchmarkRWMutexWrite100(b *testing.B) {
index 84c4cfc37a34a43bf2dc13d40263efbae5ff7789..0cbd51056a7e17f89850bbf9794e8968347e7e72 100644 (file)
@@ -5,9 +5,7 @@
 package sync_test
 
 import (
-       "runtime"
        . "sync"
-       "sync/atomic"
        "testing"
 )
 
@@ -66,55 +64,30 @@ func BenchmarkWaitGroupUncontended(b *testing.B) {
                WaitGroup
                pad [128]uint8
        }
-       const CallsPerSched = 1000
-       procs := runtime.GOMAXPROCS(-1)
-       N := int32(b.N / CallsPerSched)
-       c := make(chan bool, procs)
-       for p := 0; p < procs; p++ {
-               go func() {
-                       var wg PaddedWaitGroup
-                       for atomic.AddInt32(&N, -1) >= 0 {
-                               runtime.Gosched()
-                               for g := 0; g < CallsPerSched; g++ {
-                                       wg.Add(1)
-                                       wg.Done()
-                                       wg.Wait()
-                               }
-                       }
-                       c <- true
-               }()
-       }
-       for p := 0; p < procs; p++ {
-               <-c
-       }
+       b.RunParallel(func(pb *testing.PB) {
+               var wg PaddedWaitGroup
+               for pb.Next() {
+                       wg.Add(1)
+                       wg.Done()
+                       wg.Wait()
+               }
+       })
 }
 
 func benchmarkWaitGroupAddDone(b *testing.B, localWork int) {
-       const CallsPerSched = 1000
-       procs := runtime.GOMAXPROCS(-1)
-       N := int32(b.N / CallsPerSched)
-       c := make(chan bool, procs)
        var wg WaitGroup
-       for p := 0; p < procs; p++ {
-               go func() {
-                       foo := 0
-                       for atomic.AddInt32(&N, -1) >= 0 {
-                               runtime.Gosched()
-                               for g := 0; g < CallsPerSched; g++ {
-                                       wg.Add(1)
-                                       for i := 0; i < localWork; i++ {
-                                               foo *= 2
-                                               foo /= 2
-                                       }
-                                       wg.Done()
-                               }
+       b.RunParallel(func(pb *testing.PB) {
+               foo := 0
+               for pb.Next() {
+                       wg.Add(1)
+                       for i := 0; i < localWork; i++ {
+                               foo *= 2
+                               foo /= 2
                        }
-                       c <- foo == 42
-               }()
-       }
-       for p := 0; p < procs; p++ {
-               <-c
-       }
+                       wg.Done()
+               }
+               _ = foo
+       })
 }
 
 func BenchmarkWaitGroupAddDone(b *testing.B) {
@@ -126,34 +99,18 @@ func BenchmarkWaitGroupAddDoneWork(b *testing.B) {
 }
 
 func benchmarkWaitGroupWait(b *testing.B, localWork int) {
-       const CallsPerSched = 1000
-       procs := runtime.GOMAXPROCS(-1)
-       N := int32(b.N / CallsPerSched)
-       c := make(chan bool, procs)
        var wg WaitGroup
-       wg.Add(procs)
-       for p := 0; p < procs; p++ {
-               go wg.Done()
-       }
-       for p := 0; p < procs; p++ {
-               go func() {
-                       foo := 0
-                       for atomic.AddInt32(&N, -1) >= 0 {
-                               runtime.Gosched()
-                               for g := 0; g < CallsPerSched; g++ {
-                                       wg.Wait()
-                                       for i := 0; i < localWork; i++ {
-                                               foo *= 2
-                                               foo /= 2
-                                       }
-                               }
+       b.RunParallel(func(pb *testing.PB) {
+               foo := 0
+               for pb.Next() {
+                       wg.Wait()
+                       for i := 0; i < localWork; i++ {
+                               foo *= 2
+                               foo /= 2
                        }
-                       c <- foo == 42
-               }()
-       }
-       for p := 0; p < procs; p++ {
-               <-c
-       }
+               }
+               _ = foo
+       })
 }
 
 func BenchmarkWaitGroupWait(b *testing.B) {