]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: bound defer pools (try 2)
authorDmitry Vyukov <dvyukov@google.com>
Thu, 5 Feb 2015 13:35:41 +0000 (13:35 +0000)
committerDmitry Vyukov <dvyukov@google.com>
Wed, 4 Mar 2015 14:29:58 +0000 (14:29 +0000)
The unbounded list-based defer pool can grow infinitely.
This can happen if a goroutine routinely allocates a defer;
then blocks on one P; and then unblocked, scheduled and
frees the defer on another P.
The scenario was reported on golang-nuts list.

We've been here several times. Any unbounded local caches
are bad and grow to infinite size. This change introduces
central defer pool; local pools become fixed-size
with the only purpose of amortizing accesses to the
central pool.

Freedefer now executes on system stack to not consume
nosplit stack space.

Change-Id: I1a27695838409259d1586a0adfa9f92bccf7ceba
Reviewed-on: https://go-review.googlesource.com/3967
Reviewed-by: Keith Randall <khr@golang.org>
Run-TryBot: Dmitry Vyukov <dvyukov@google.com>

src/runtime/mgc.go
src/runtime/panic.go
src/runtime/proc1.go
src/runtime/runtime2.go

index 5417d3a291a84602382f00d1969a788839a7018d..af1615376e1eecfd662dc43d62e97111c54e22cb 100644 (file)
@@ -641,6 +641,21 @@ func clearpools() {
        sched.sudogcache = nil
        unlock(&sched.sudoglock)
 
+       // Clear central defer pools.
+       // Leave per-P pools alone, they have strictly bounded size.
+       lock(&sched.deferlock)
+       for i := range sched.deferpool {
+               // disconnect cached list before dropping it on the floor,
+               // so that a dangling ref to one entry does not pin all of them.
+               var d, dlink *_defer
+               for d = sched.deferpool[i]; d != nil; d = dlink {
+                       dlink = d.link
+                       d.link = nil
+               }
+               sched.deferpool[i] = nil
+       }
+       unlock(&sched.deferlock)
+
        for _, p := range &allp {
                if p == nil {
                        break
@@ -650,18 +665,6 @@ func clearpools() {
                        c.tiny = nil
                        c.tinyoffset = 0
                }
-
-               // clear defer pools
-               for i := range p.deferpool {
-                       // disconnect cached list before dropping it on the floor,
-                       // so that a dangling ref to one entry does not pin all of them.
-                       var d, dlink *_defer
-                       for d = p.deferpool[i]; d != nil; d = dlink {
-                               dlink = d.link
-                               d.link = nil
-                       }
-                       p.deferpool[i] = nil
-               }
        }
 }
 
index e2a5c629dada2301c9e3ecdf8688585e45eb0a5d..9507384b92d3c7369df61afe162a9c6d3464e48e 100644 (file)
@@ -166,9 +166,20 @@ func newdefer(siz int32) *_defer {
        mp := acquirem()
        if sc < uintptr(len(p{}.deferpool)) {
                pp := mp.p
-               d = pp.deferpool[sc]
-               if d != nil {
-                       pp.deferpool[sc] = d.link
+               if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
+                       lock(&sched.deferlock)
+                       for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
+                               d := sched.deferpool[sc]
+                               sched.deferpool[sc] = d.link
+                               d.link = nil
+                               pp.deferpool[sc] = append(pp.deferpool[sc], d)
+                       }
+                       unlock(&sched.deferlock)
+               }
+               if ln := len(pp.deferpool[sc]); ln > 0 {
+                       d = pp.deferpool[sc][ln-1]
+                       pp.deferpool[sc][ln-1] = nil
+                       pp.deferpool[sc] = pp.deferpool[sc][:ln-1]
                }
        }
        if d == nil {
@@ -196,7 +207,6 @@ func newdefer(siz int32) *_defer {
 
 // Free the given defer.
 // The defer cannot be used after this call.
-//go:nosplit
 func freedefer(d *_defer) {
        if d._panic != nil {
                freedeferpanic()
@@ -214,9 +224,28 @@ func freedefer(d *_defer) {
        if sc < uintptr(len(p{}.deferpool)) {
                mp := acquirem()
                pp := mp.p
+               if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
+                       // Transfer half of local cache to the central cache.
+                       var first, last *_defer
+                       for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
+                               ln := len(pp.deferpool[sc])
+                               d := pp.deferpool[sc][ln-1]
+                               pp.deferpool[sc][ln-1] = nil
+                               pp.deferpool[sc] = pp.deferpool[sc][:ln-1]
+                               if first == nil {
+                                       first = d
+                               } else {
+                                       last.link = d
+                               }
+                               last = d
+                       }
+                       lock(&sched.deferlock)
+                       last.link = sched.deferpool[sc]
+                       sched.deferpool[sc] = first
+                       unlock(&sched.deferlock)
+               }
                *d = _defer{}
-               d.link = pp.deferpool[sc]
-               pp.deferpool[sc] = d
+               pp.deferpool[sc] = append(pp.deferpool[sc], d)
                releasem(mp)
        }
 }
@@ -267,7 +296,10 @@ func deferreturn(arg0 uintptr) {
        fn := d.fn
        d.fn = nil
        gp._defer = d.link
-       freedefer(d)
+       // Switch to systemstack merely to save nosplit stack space.
+       systemstack(func() {
+               freedefer(d)
+       })
        releasem(mp)
        jmpdefer(fn, uintptr(unsafe.Pointer(&arg0)))
 }
index 906528c0abd45e92d8ae8012b603bca2ef0384df..7ecf60ee5eaa3a68ba5d1bf45b09b2e8b0e5e6b8 100644 (file)
@@ -2484,6 +2484,9 @@ func procresize(nprocs int32) *p {
                        pp.id = i
                        pp.status = _Pgcstop
                        pp.sudogcache = pp.sudogbuf[:0]
+                       for i := range pp.deferpool {
+                               pp.deferpool[i] = pp.deferpoolbuf[i][:0]
+                       }
                        atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
                }
                if pp.mcache == nil {
@@ -2526,6 +2529,12 @@ func procresize(nprocs int32) *p {
                        p.sudogbuf[i] = nil
                }
                p.sudogcache = p.sudogbuf[:0]
+               for i := range p.deferpool {
+                       for j := range p.deferpoolbuf[i] {
+                               p.deferpoolbuf[i][j] = nil
+                       }
+                       p.deferpool[i] = p.deferpoolbuf[i][:0]
+               }
                freemcache(p.mcache)
                p.mcache = nil
                gfpurge(p)
index 81d39fb48e349c8807c6254bb3175a051237873d..94fbb5253a5b5e3f5c3e2729f575df08ef92702f 100644 (file)
@@ -314,7 +314,9 @@ type p struct {
        syscalltick uint32 // incremented on every system call
        m           *m     // back-link to associated m (nil if idle)
        mcache      *mcache
-       deferpool   [5]*_defer // pool of available defer structs of different sizes (see panic.c)
+
+       deferpool    [5][]*_defer // pool of available defer structs of different sizes (see panic.c)
+       deferpoolbuf [5][32]*_defer
 
        // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
        goidcache    uint64
@@ -372,6 +374,10 @@ type schedt struct {
        sudoglock  mutex
        sudogcache *sudog
 
+       // Central pool of available defer structs of different sizes.
+       deferlock mutex
+       deferpool [5]*_defer
+
        gcwaiting  uint32 // gc is waiting to run
        stopwait   int32
        stopnote   note