empty lfstack // lock-free list of empty blocks workbuf
pad0 [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
+ wbufSpans struct {
+ lock mutex
+ // busy is a list of all spans containing workbufs on
+ // one of the workbuf lists.
+ busy mSpanList
+ }
+
+ // Restore 64-bit alignment on 32-bit.
+ _ uint32
+
// bytesMarked is the number of bytes marked this cycle. This
// includes bytes blackened in scanned objects, noscan objects
// that go straight to black, and permagrey objects scanned by
const (
_WorkbufSize = 2048 // in bytes; larger values result in less contention
+
+ // workbufAlloc is the number of bytes to allocate at a time
+ // for new workbufs. This must be a multiple of pageSize and
+ // should be a multiple of _WorkbufSize.
+ //
+ // Larger values reduce workbuf allocation overhead. Smaller
+ // values reduce heap fragmentation.
+ workbufAlloc = 32 << 10
)
+func init() {
+ if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
+ throw("bad workbufAlloc")
+ }
+}
+
// Garbage collector work pool abstraction.
//
// This implements a producer/consumer model for pointers to grey
}
}
if b == nil {
- b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), sys.CacheLineSize, &memstats.gc_sys))
+ // Allocate more workbufs.
+ var s *mspan
+ systemstack(func() {
+ s = mheap_.allocManual(workbufAlloc/pageSize, &memstats.gc_sys)
+ })
+ if s == nil {
+ throw("out of memory")
+ }
+ // Record the new span in the busy list.
+ lock(&work.wbufSpans.lock)
+ work.wbufSpans.busy.insert(s)
+ unlock(&work.wbufSpans.lock)
+ // Slice up the span into new workbufs. Return one and
+ // put the rest on the empty list.
+ for i := uintptr(0); i+_WorkbufSize <= workbufAlloc; i += _WorkbufSize {
+ newb := (*workbuf)(unsafe.Pointer(s.base() + i))
+ newb.nobj = 0
+ if i == 0 {
+ b = newb
+ } else {
+ putempty(newb)
+ }
+ }
}
return b
}