]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: allocate GC workbufs from manually-managed spans
authorAustin Clements <austin@google.com>
Mon, 20 Mar 2017 18:05:48 +0000 (14:05 -0400)
committerAustin Clements <austin@google.com>
Thu, 13 Apr 2017 18:20:44 +0000 (18:20 +0000)
Currently the runtime allocates workbufs from persistent memory, which
means they can never be freed.

Switch to allocating them from manually-managed heap spans. This
doesn't free them yet, but it puts us in a position to do so.

For #19325.

Change-Id: I94b2512a2f2bbbb456cd9347761b9412e80d2da9
Reviewed-on: https://go-review.googlesource.com/38581
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
src/runtime/mgc.go
src/runtime/mgcwork.go

index b79617edf7a27479f0c33ff309b3c9d7a5bec3a1..d537aaf67efc2f1eb1c4dbff6f7c6fab45f91548 100644 (file)
@@ -795,6 +795,16 @@ var work struct {
        empty lfstack                  // lock-free list of empty blocks workbuf
        pad0  [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
 
+       wbufSpans struct {
+               lock mutex
+               // busy is a list of all spans containing workbufs on
+               // one of the workbuf lists.
+               busy mSpanList
+       }
+
+       // Restore 64-bit alignment on 32-bit.
+       _ uint32
+
        // bytesMarked is the number of bytes marked this cycle. This
        // includes bytes blackened in scanned objects, noscan objects
        // that go straight to black, and permagrey objects scanned by
index 1df40d2afe1c5f85eadc08f656de89333ee312cd..a9559230dec6ae78fcc8ddb064be03104383db38 100644 (file)
@@ -12,8 +12,22 @@ import (
 
 const (
        _WorkbufSize = 2048 // in bytes; larger values result in less contention
+
+       // workbufAlloc is the number of bytes to allocate at a time
+       // for new workbufs. This must be a multiple of pageSize and
+       // should be a multiple of _WorkbufSize.
+       //
+       // Larger values reduce workbuf allocation overhead. Smaller
+       // values reduce heap fragmentation.
+       workbufAlloc = 32 << 10
 )
 
+func init() {
+       if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
+               throw("bad workbufAlloc")
+       }
+}
+
 // Garbage collector work pool abstraction.
 //
 // This implements a producer/consumer model for pointers to grey
@@ -318,7 +332,29 @@ func getempty() *workbuf {
                }
        }
        if b == nil {
-               b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), sys.CacheLineSize, &memstats.gc_sys))
+               // Allocate more workbufs.
+               var s *mspan
+               systemstack(func() {
+                       s = mheap_.allocManual(workbufAlloc/pageSize, &memstats.gc_sys)
+               })
+               if s == nil {
+                       throw("out of memory")
+               }
+               // Record the new span in the busy list.
+               lock(&work.wbufSpans.lock)
+               work.wbufSpans.busy.insert(s)
+               unlock(&work.wbufSpans.lock)
+               // Slice up the span into new workbufs. Return one and
+               // put the rest on the empty list.
+               for i := uintptr(0); i+_WorkbufSize <= workbufAlloc; i += _WorkbufSize {
+                       newb := (*workbuf)(unsafe.Pointer(s.base() + i))
+                       newb.nobj = 0
+                       if i == 0 {
+                               b = newb
+                       } else {
+                               putempty(newb)
+                       }
+               }
        }
        return b
 }