})
}
}
+
+func BenchmarkErrOK(b *testing.B) {
+ ctx, cancel := WithCancel(Background())
+ defer cancel()
+ for b.Loop() {
+ if err := ctx.Err(); err != nil {
+ b.Fatalf("ctx.Err() = %v", err)
+ }
+ }
+}
+
+func BenchmarkErrCanceled(b *testing.B) {
+ ctx, cancel := WithCancel(Background())
+ cancel()
+ for b.Loop() {
+ if err := ctx.Err(); err == nil {
+ b.Fatalf("ctx.Err() = %v", err)
+ }
+ }
+}
mu sync.Mutex // protects following fields
done atomic.Value // of chan struct{}, created lazily, closed by first cancel call
children map[canceler]struct{} // set to nil by the first cancel call
- err error // set to non-nil by the first cancel call
+ err atomic.Value // set to non-nil by the first cancel call
cause error // set to non-nil by the first cancel call
}
}
func (c *cancelCtx) Err() error {
- c.mu.Lock()
- err := c.err
- c.mu.Unlock()
- return err
+ // An atomic load is ~5x faster than a mutex, which can matter in tight loops.
+ if err := c.err.Load(); err != nil {
+ return err.(error)
+ }
+ return nil
}
// propagateCancel arranges for child to be canceled when parent is.
if p, ok := parentCancelCtx(parent); ok {
// parent is a *cancelCtx, or derives from one.
p.mu.Lock()
- if p.err != nil {
+ if err := p.err.Load(); err != nil {
// parent has already been canceled
- child.cancel(false, p.err, p.cause)
+ child.cancel(false, err.(error), p.cause)
} else {
if p.children == nil {
p.children = make(map[canceler]struct{})
cause = err
}
c.mu.Lock()
- if c.err != nil {
+ if c.err.Load() != nil {
c.mu.Unlock()
return // already canceled
}
- c.err = err
+ c.err.Store(err)
c.cause = cause
d, _ := c.done.Load().(chan struct{})
if d == nil {
}
c.mu.Lock()
defer c.mu.Unlock()
- if c.err == nil {
+ if c.err.Load() == nil {
c.timer = time.AfterFunc(dur, func() {
c.cancel(true, DeadlineExceeded, cause)
})