sg := mheap_.sweepgen
spans := mheap_.sweepSpans[mheap_.sweepgen/2%2].block(shard)
// Note that work.spans may not include spans that were
- // allocated between entering the scan phase and now. This is
- // okay because any objects with finalizers in those spans
- // must have been allocated and given finalizers after we
- // entered the scan phase, so addfinalizer will have ensured
- // the above invariants for them.
- for _, s := range spans {
+ // allocated between entering the scan phase and now. We may
+ // also race with spans being added into sweepSpans when they're
+ // just created, and as a result we may see nil pointers in the
+ // spans slice. This is okay because any objects with finalizers
+ // in those spans must have been allocated and given finalizers
+ // after we entered the scan phase, so addfinalizer will have
+ // ensured the above invariants for them.
+ for i := 0; i < len(spans); i++ {
+ // sweepBuf.block requires that we read pointers from the block atomically.
+ // It also requires that we ignore nil pointers.
+ s := (*mspan)(atomic.Loadp(unsafe.Pointer(&spans[i])))
+
// This is racing with spans being initialized, so
// check the state carefully.
- if s.state.get() != mSpanInUse {
+ if s == nil || s.state.get() != mSpanInUse {
continue
}
// Check that this span was swept (it may be cached or uncached).
unlock(&b.spineLock)
}
- // We have a block. Insert the span.
- block.spans[bottom] = s
+ // We have a block. Insert the span atomically, since there may be
+ // concurrent readers via the block API.
+ atomic.StorepNoWB(unsafe.Pointer(&block.spans[bottom]), unsafe.Pointer(s))
}
// pop removes and returns a span from buffer b, or nil if b is empty.
}
// block returns the spans in the i'th block of buffer b. block is
-// safe to call concurrently with push.
+// safe to call concurrently with push. The block may contain nil
+// pointers that must be ignored, and each entry in the block must be
+// loaded atomically.
func (b *gcSweepBuf) block(i int) []*mspan {
// Perform bounds check before loading spine address since
// push ensures the allocated length is at least spineLen.
} else {
spans = block.spans[:bottom]
}
-
- // push may have reserved a slot but not filled it yet, so
- // trim away unused entries.
- for len(spans) > 0 && spans[len(spans)-1] == nil {
- spans = spans[:len(spans)-1]
- }
return spans
}