wbufSpans struct {
lock mutex
+ // free is a list of spans dedicated to workbufs, but
+ // that don't currently contain any workbufs.
+ free mSpanList
// busy is a list of all spans containing workbufs on
// one of the workbuf lists.
busy mSpanList
// world stopped.
mProf_Flush()
+ // Prepare workbufs for freeing by the sweeper. We do this
+ // asynchronously because it can take non-trivial time.
+ prepareFreeWorkbufs()
+
// Free stack spans. This must be done between GC cycles.
systemstack(freeStackSpans)
for sweepone() != ^uintptr(0) {
sweep.npausesweep++
}
+ // Free workbufs eagerly.
+ prepareFreeWorkbufs()
+ for freeSomeWbufs(false) {
+ }
// All "free" events for this mark/sweep cycle have
// now happened, so we can make this profile cycle
// available immediately.
if b == nil {
// Allocate more workbufs.
var s *mspan
- systemstack(func() {
- s = mheap_.allocManual(workbufAlloc/pageSize, &memstats.gc_sys)
- })
+ if work.wbufSpans.free.first != nil {
+ lock(&work.wbufSpans.lock)
+ s = work.wbufSpans.free.first
+ if s != nil {
+ work.wbufSpans.free.remove(s)
+ work.wbufSpans.busy.insert(s)
+ }
+ unlock(&work.wbufSpans.lock)
+ }
if s == nil {
- throw("out of memory")
+ systemstack(func() {
+ s = mheap_.allocManual(workbufAlloc/pageSize, &memstats.gc_sys)
+ })
+ if s == nil {
+ throw("out of memory")
+ }
+ // Record the new span in the busy list.
+ lock(&work.wbufSpans.lock)
+ work.wbufSpans.busy.insert(s)
+ unlock(&work.wbufSpans.lock)
}
- // Record the new span in the busy list.
- lock(&work.wbufSpans.lock)
- work.wbufSpans.busy.insert(s)
- unlock(&work.wbufSpans.lock)
// Slice up the span into new workbufs. Return one and
// put the rest on the empty list.
for i := uintptr(0); i+_WorkbufSize <= workbufAlloc; i += _WorkbufSize {
putfull(b)
return b1
}
+
+// prepareFreeWorkbufs moves busy workbuf spans to free list so they
+// can be freed to the heap. This must only be called when all
+// workbufs are on the empty list.
+func prepareFreeWorkbufs() {
+ lock(&work.wbufSpans.lock)
+ if work.full != 0 {
+ throw("cannot free workbufs when work.full != 0")
+ }
+ // Since all workbufs are on the empty list, we don't care
+ // which ones are in which spans. We can wipe the entire empty
+ // list and move all workbuf spans to the free list.
+ work.empty = 0
+ work.wbufSpans.free.takeAll(&work.wbufSpans.busy)
+ unlock(&work.wbufSpans.lock)
+}
+
+// freeSomeWbufs frees some workbufs back to the heap and returns
+// true if it should be called again to free more.
+func freeSomeWbufs(preemptible bool) bool {
+ const batchSize = 64 // ~1–2 µs per span.
+ lock(&work.wbufSpans.lock)
+ if gcphase != _GCoff || work.wbufSpans.free.isEmpty() {
+ unlock(&work.wbufSpans.lock)
+ return false
+ }
+ systemstack(func() {
+ gp := getg().m.curg
+ for i := 0; i < batchSize && !(preemptible && gp.preempt); i++ {
+ span := work.wbufSpans.free.first
+ if span == nil {
+ break
+ }
+ work.wbufSpans.free.remove(span)
+ mheap_.freeManual(span, &memstats.gc_sys)
+ }
+ })
+ more := !work.wbufSpans.free.isEmpty()
+ unlock(&work.wbufSpans.lock)
+ return more
+}
span.list = list
}
+// takeAll removes all spans from other and inserts them at the front
+// of list.
+func (list *mSpanList) takeAll(other *mSpanList) {
+ if other.isEmpty() {
+ return
+ }
+
+ // Reparent everything in other to list.
+ for s := other.first; s != nil; s = s.next {
+ s.list = list
+ }
+
+ // Concatenate the lists.
+ if list.isEmpty() {
+ *list = *other
+ } else {
+ // Neither list is empty. Put other before list.
+ other.last.next = list.first
+ list.first.prev = other.last
+ list.first = other.first
+ }
+
+ other.first, other.last = nil, nil
+}
+
const (
_KindSpecialFinalizer = 1
_KindSpecialProfile = 2