The unbounded list-based defer pool can grow infinitely.
This can happen if a goroutine routinely allocates a defer;
then blocks on one P; and then unblocked, scheduled and
frees the defer on another P.
The scenario was reported on golang-nuts list.
We've been here several times. Any unbounded local caches
are bad and grow to infinite size. This change introduces
central defer pool; local pools become fixed-size
with the only purpose of amortizing accesses to the
central pool.
Freedefer now executes on system stack to not consume
nosplit stack space.
Change-Id: I1a27695838409259d1586a0adfa9f92bccf7ceba
Reviewed-on: https://go-review.googlesource.com/3967
Reviewed-by: Keith Randall <khr@golang.org>
Run-TryBot: Dmitry Vyukov <dvyukov@google.com>
sched.sudogcache = nil
unlock(&sched.sudoglock)
+ // Clear central defer pools.
+ // Leave per-P pools alone, they have strictly bounded size.
+ lock(&sched.deferlock)
+ for i := range sched.deferpool {
+ // disconnect cached list before dropping it on the floor,
+ // so that a dangling ref to one entry does not pin all of them.
+ var d, dlink *_defer
+ for d = sched.deferpool[i]; d != nil; d = dlink {
+ dlink = d.link
+ d.link = nil
+ }
+ sched.deferpool[i] = nil
+ }
+ unlock(&sched.deferlock)
+
for _, p := range &allp {
if p == nil {
break
c.tiny = nil
c.tinyoffset = 0
}
-
- // clear defer pools
- for i := range p.deferpool {
- // disconnect cached list before dropping it on the floor,
- // so that a dangling ref to one entry does not pin all of them.
- var d, dlink *_defer
- for d = p.deferpool[i]; d != nil; d = dlink {
- dlink = d.link
- d.link = nil
- }
- p.deferpool[i] = nil
- }
}
}
mp := acquirem()
if sc < uintptr(len(p{}.deferpool)) {
pp := mp.p
- d = pp.deferpool[sc]
- if d != nil {
- pp.deferpool[sc] = d.link
+ if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
+ lock(&sched.deferlock)
+ for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
+ d := sched.deferpool[sc]
+ sched.deferpool[sc] = d.link
+ d.link = nil
+ pp.deferpool[sc] = append(pp.deferpool[sc], d)
+ }
+ unlock(&sched.deferlock)
+ }
+ if ln := len(pp.deferpool[sc]); ln > 0 {
+ d = pp.deferpool[sc][ln-1]
+ pp.deferpool[sc][ln-1] = nil
+ pp.deferpool[sc] = pp.deferpool[sc][:ln-1]
}
}
if d == nil {
// Free the given defer.
// The defer cannot be used after this call.
-//go:nosplit
func freedefer(d *_defer) {
if d._panic != nil {
freedeferpanic()
if sc < uintptr(len(p{}.deferpool)) {
mp := acquirem()
pp := mp.p
+ if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
+ // Transfer half of local cache to the central cache.
+ var first, last *_defer
+ for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
+ ln := len(pp.deferpool[sc])
+ d := pp.deferpool[sc][ln-1]
+ pp.deferpool[sc][ln-1] = nil
+ pp.deferpool[sc] = pp.deferpool[sc][:ln-1]
+ if first == nil {
+ first = d
+ } else {
+ last.link = d
+ }
+ last = d
+ }
+ lock(&sched.deferlock)
+ last.link = sched.deferpool[sc]
+ sched.deferpool[sc] = first
+ unlock(&sched.deferlock)
+ }
*d = _defer{}
- d.link = pp.deferpool[sc]
- pp.deferpool[sc] = d
+ pp.deferpool[sc] = append(pp.deferpool[sc], d)
releasem(mp)
}
}
fn := d.fn
d.fn = nil
gp._defer = d.link
- freedefer(d)
+ // Switch to systemstack merely to save nosplit stack space.
+ systemstack(func() {
+ freedefer(d)
+ })
releasem(mp)
jmpdefer(fn, uintptr(unsafe.Pointer(&arg0)))
}
pp.id = i
pp.status = _Pgcstop
pp.sudogcache = pp.sudogbuf[:0]
+ for i := range pp.deferpool {
+ pp.deferpool[i] = pp.deferpoolbuf[i][:0]
+ }
atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
}
if pp.mcache == nil {
p.sudogbuf[i] = nil
}
p.sudogcache = p.sudogbuf[:0]
+ for i := range p.deferpool {
+ for j := range p.deferpoolbuf[i] {
+ p.deferpoolbuf[i][j] = nil
+ }
+ p.deferpool[i] = p.deferpoolbuf[i][:0]
+ }
freemcache(p.mcache)
p.mcache = nil
gfpurge(p)
syscalltick uint32 // incremented on every system call
m *m // back-link to associated m (nil if idle)
mcache *mcache
- deferpool [5]*_defer // pool of available defer structs of different sizes (see panic.c)
+
+ deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.c)
+ deferpoolbuf [5][32]*_defer
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
goidcache uint64
sudoglock mutex
sudogcache *sudog
+ // Central pool of available defer structs of different sizes.
+ deferlock mutex
+ deferpool [5]*_defer
+
gcwaiting uint32 // gc is waiting to run
stopwait int32
stopnote note