Simpler concept, and it turns a queue into a stack.
Speeds up benchmarks noticeably.
Before:
fmt_test.BenchmarkSprintfEmpty
10000000 282 ns/op
fmt_test.BenchmarkSprintfString
2000000 910 ns/op
fmt_test.BenchmarkSprintfInt
5000000 723 ns/op
fmt_test.BenchmarkSprintfIntInt
1000000 1071 ns/op
fmt_test.BenchmarkSprintfPrefixedInt
1000000 1108 ns/op
fmt_test.BenchmarkScanInts 1000
2239510 ns/op
fmt_test.BenchmarkScanRecursiveInt 1000
2365432 ns/op
After:
fmt_test.BenchmarkSprintfEmpty
10000000 232 ns/op
fmt_test.BenchmarkSprintfString
2000000 837 ns/op
fmt_test.BenchmarkSprintfInt
5000000 590 ns/op
fmt_test.BenchmarkSprintfIntInt
2000000 910 ns/op
fmt_test.BenchmarkSprintfPrefixedInt
2000000 996 ns/op
fmt_test.BenchmarkScanInts 1000
2210715 ns/op
fmt_test.BenchmarkScanRecursiveInt 1000
2367800 ns/op
R=rsc, r
CC=golang-dev
https://golang.org/cl/
5151044
"io"
"os"
"reflect"
+ "sync"
"unicode"
"utf8"
)
}
// A cache holds a set of reusable objects.
-// The buffered channel holds the currently available objects.
+// The slice is a stack (LIFO).
// If more are needed, the cache creates them by calling new.
type cache struct {
- saved chan interface{}
+ mu sync.Mutex
+ saved []interface{}
new func() interface{}
}
func (c *cache) put(x interface{}) {
- select {
- case c.saved <- x:
- // saved in cache
- default:
- // discard
+ c.mu.Lock()
+ if len(c.saved) < cap(c.saved) {
+ c.saved = append(c.saved, x)
}
+ c.mu.Unlock()
}
func (c *cache) get() interface{} {
- select {
- case x := <-c.saved:
- return x // reused from cache
- default:
+ c.mu.Lock()
+ n := len(c.saved)
+ if n == 0 {
+ c.mu.Unlock()
return c.new()
}
- panic("not reached")
+ x := c.saved[n-1]
+ c.saved = c.saved[0 : n-1]
+ c.mu.Unlock()
+ return x
}
func newCache(f func() interface{}) *cache {
- return &cache{make(chan interface{}, 100), f}
+ return &cache{saved: make([]interface{}, 0, 100), new: f}
}
var ppFree = newCache(func() interface{} { return new(pp) })