import (
"runtime"
. "sync"
- "sync/atomic"
"testing"
)
Mutex
pad [128]uint8
}
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- for p := 0; p < procs; p++ {
- go func() {
- var mu PaddedMutex
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for g := 0; g < CallsPerSched; g++ {
- mu.Lock()
- mu.Unlock()
- }
- }
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ b.RunParallel(func(pb *testing.PB) {
+ var mu PaddedMutex
+ for pb.Next() {
+ mu.Lock()
+ mu.Unlock()
+ }
+ })
}
func benchmarkMutex(b *testing.B, slack, work bool) {
- const (
- CallsPerSched = 1000
- LocalWork = 100
- GoroutineSlack = 10
- )
- procs := runtime.GOMAXPROCS(-1)
+ var mu Mutex
if slack {
- procs *= GoroutineSlack
+ b.SetParallelism(10)
}
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- var mu Mutex
- for p := 0; p < procs; p++ {
- go func() {
- foo := 0
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for g := 0; g < CallsPerSched; g++ {
- mu.Lock()
- mu.Unlock()
- if work {
- for i := 0; i < LocalWork; i++ {
- foo *= 2
- foo /= 2
- }
- }
+ b.RunParallel(func(pb *testing.PB) {
+ foo := 0
+ for pb.Next() {
+ mu.Lock()
+ mu.Unlock()
+ if work {
+ for i := 0; i < 100; i++ {
+ foo *= 2
+ foo /= 2
}
}
- c <- foo == 42
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ }
+ _ = foo
+ })
}
func BenchmarkMutex(b *testing.B) {
package sync_test
import (
- "runtime"
. "sync"
- "sync/atomic"
"testing"
)
}
func BenchmarkOnce(b *testing.B) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
var once Once
f := func() {}
- c := make(chan bool, procs)
- for p := 0; p < procs; p++ {
- go func() {
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for g := 0; g < CallsPerSched; g++ {
- once.Do(f)
- }
- }
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ once.Do(f)
+ }
+ })
}
func BenchmarkPool(b *testing.B) {
var p Pool
- var wg WaitGroup
- n0 := uintptr(b.N)
- n := n0
- for i := 0; i < runtime.GOMAXPROCS(0); i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- for atomic.AddUintptr(&n, ^uintptr(0)) < n0 {
- for b := 0; b < 100; b++ {
- p.Put(1)
- p.Get()
- }
- }
- }()
- }
- wg.Wait()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ p.Put(1)
+ p.Get()
+ }
+ })
}
func BenchmarkPoolOverlflow(b *testing.B) {
var p Pool
- var wg WaitGroup
- n0 := uintptr(b.N)
- n := n0
- for i := 0; i < runtime.GOMAXPROCS(0); i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- for atomic.AddUintptr(&n, ^uintptr(0)) < n0 {
- for b := 0; b < 100; b++ {
- p.Put(1)
- }
- for b := 0; b < 100; b++ {
- p.Get()
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ for b := 0; b < 100; b++ {
+ p.Put(1)
}
- }()
- }
- wg.Wait()
+ for b := 0; b < 100; b++ {
+ p.Get()
+ }
+ }
+ })
}
import (
"runtime"
. "sync"
- "sync/atomic"
"testing"
)
sem uint32
pad [32]uint32
}
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- for p := 0; p < procs; p++ {
- go func() {
- sem := new(PaddedSem)
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for g := 0; g < CallsPerSched; g++ {
- Runtime_Semrelease(&sem.sem)
- Runtime_Semacquire(&sem.sem)
- }
- }
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ b.RunParallel(func(pb *testing.PB) {
+ sem := new(PaddedSem)
+ for pb.Next() {
+ Runtime_Semrelease(&sem.sem)
+ Runtime_Semacquire(&sem.sem)
+ }
+ })
}
func benchmarkSema(b *testing.B, block, work bool) {
- const CallsPerSched = 1000
- const LocalWork = 100
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- c2 := make(chan bool, procs/2)
sem := uint32(0)
if block {
- for p := 0; p < procs/2; p++ {
- go func() {
- Runtime_Semacquire(&sem)
- c2 <- true
- }()
- }
- }
- for p := 0; p < procs; p++ {
+ done := make(chan bool)
go func() {
- foo := 0
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for g := 0; g < CallsPerSched; g++ {
- Runtime_Semrelease(&sem)
- if work {
- for i := 0; i < LocalWork; i++ {
- foo *= 2
- foo /= 2
- }
- }
- Runtime_Semacquire(&sem)
- }
+ for p := 0; p < runtime.GOMAXPROCS(0)/2; p++ {
+ Runtime_Semacquire(&sem)
}
- c <- foo == 42
- Runtime_Semrelease(&sem)
+ done <- true
+ }()
+ defer func() {
+ <-done
}()
}
- if block {
- for p := 0; p < procs/2; p++ {
- <-c2
+ b.RunParallel(func(pb *testing.PB) {
+ foo := 0
+ for pb.Next() {
+ Runtime_Semrelease(&sem)
+ if work {
+ for i := 0; i < 100; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ }
+ Runtime_Semacquire(&sem)
}
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ _ = foo
+ Runtime_Semrelease(&sem)
+ })
}
func BenchmarkSemaSyntNonblock(b *testing.B) {
RWMutex
pad [32]uint32
}
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- for p := 0; p < procs; p++ {
- go func() {
- var rwm PaddedRWMutex
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for g := 0; g < CallsPerSched; g++ {
- rwm.RLock()
- rwm.RLock()
- rwm.RUnlock()
- rwm.RUnlock()
- rwm.Lock()
- rwm.Unlock()
- }
- }
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ b.RunParallel(func(pb *testing.PB) {
+ var rwm PaddedRWMutex
+ for pb.Next() {
+ rwm.RLock()
+ rwm.RLock()
+ rwm.RUnlock()
+ rwm.RUnlock()
+ rwm.Lock()
+ rwm.Unlock()
+ }
+ })
}
func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
var rwm RWMutex
- for p := 0; p < procs; p++ {
- go func() {
- foo := 0
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for g := 0; g < CallsPerSched; g++ {
- foo++
- if foo%writeRatio == 0 {
- rwm.Lock()
- rwm.Unlock()
- } else {
- rwm.RLock()
- for i := 0; i != localWork; i += 1 {
- foo *= 2
- foo /= 2
- }
- rwm.RUnlock()
- }
+ b.RunParallel(func(pb *testing.PB) {
+ foo := 0
+ for pb.Next() {
+ foo++
+ if foo%writeRatio == 0 {
+ rwm.Lock()
+ rwm.Unlock()
+ } else {
+ rwm.RLock()
+ for i := 0; i != localWork; i += 1 {
+ foo *= 2
+ foo /= 2
}
+ rwm.RUnlock()
}
- c <- foo == 42
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ }
+ _ = foo
+ })
}
func BenchmarkRWMutexWrite100(b *testing.B) {
package sync_test
import (
- "runtime"
. "sync"
- "sync/atomic"
"testing"
)
WaitGroup
pad [128]uint8
}
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- for p := 0; p < procs; p++ {
- go func() {
- var wg PaddedWaitGroup
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for g := 0; g < CallsPerSched; g++ {
- wg.Add(1)
- wg.Done()
- wg.Wait()
- }
- }
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ b.RunParallel(func(pb *testing.PB) {
+ var wg PaddedWaitGroup
+ for pb.Next() {
+ wg.Add(1)
+ wg.Done()
+ wg.Wait()
+ }
+ })
}
func benchmarkWaitGroupAddDone(b *testing.B, localWork int) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
var wg WaitGroup
- for p := 0; p < procs; p++ {
- go func() {
- foo := 0
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for g := 0; g < CallsPerSched; g++ {
- wg.Add(1)
- for i := 0; i < localWork; i++ {
- foo *= 2
- foo /= 2
- }
- wg.Done()
- }
+ b.RunParallel(func(pb *testing.PB) {
+ foo := 0
+ for pb.Next() {
+ wg.Add(1)
+ for i := 0; i < localWork; i++ {
+ foo *= 2
+ foo /= 2
}
- c <- foo == 42
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ wg.Done()
+ }
+ _ = foo
+ })
}
func BenchmarkWaitGroupAddDone(b *testing.B) {
}
func benchmarkWaitGroupWait(b *testing.B, localWork int) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
var wg WaitGroup
- wg.Add(procs)
- for p := 0; p < procs; p++ {
- go wg.Done()
- }
- for p := 0; p < procs; p++ {
- go func() {
- foo := 0
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for g := 0; g < CallsPerSched; g++ {
- wg.Wait()
- for i := 0; i < localWork; i++ {
- foo *= 2
- foo /= 2
- }
- }
+ b.RunParallel(func(pb *testing.PB) {
+ foo := 0
+ for pb.Next() {
+ wg.Wait()
+ for i := 0; i < localWork; i++ {
+ foo *= 2
+ foo /= 2
}
- c <- foo == 42
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ }
+ _ = foo
+ })
}
func BenchmarkWaitGroupWait(b *testing.B) {