h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages)
s.isUserArenaChunk = true
s.elemsize -= userArenaChunkReserveBytes()
- s.limit = s.base() + s.elemsize
s.freeindex = 1
s.allocCount = 1
+ // Adjust s.limit down to the object-containing part of the span.
+ //
+ // This is just to create a slightly tighter bound on the limit.
+ // It's totally OK if the garbage collector, in particular
+ // conservative scanning, can temporarily observes an inflated
+ // limit. It will simply mark the whole chunk or just skip it
+ // since we're in the mark phase anyway.
+ s.limit = s.base() + s.elemsize
+
// Adjust size to include redzone.
if asanenabled {
s.elemsize -= redZoneSize(s.elemsize)
// Put the large span in the mcentral swept list so that it's
// visible to the background sweeper.
mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s)
+
+ // Adjust s.limit down to the object-containing part of the span.
+ //
+ // This is just to create a slightly tighter bound on the limit.
+ // It's totally OK if the garbage collector, in particular
+ // conservative scanning, can temporarily observes an inflated
+ // limit. It will simply mark the whole object or just skip it
+ // since we're in the mark phase anyway.
s.limit = s.base() + size
s.initHeapBits()
return s
// grow allocates a new empty span from the heap and initializes it for c's size class.
func (c *mcentral) grow() *mspan {
npages := uintptr(gc.SizeClassToNPages[c.spanclass.sizeclass()])
- size := uintptr(gc.SizeClassToSize[c.spanclass.sizeclass()])
-
s := mheap_.alloc(npages, c.spanclass)
if s == nil {
return nil
}
- s.limit = s.base() + size*uintptr(s.nelems)
s.initHeapBits()
return s
}
if typ.manual() {
s.manualFreeList = 0
s.nelems = 0
- s.limit = s.base() + s.npages*pageSize
s.state.set(mSpanManual)
} else {
// We must set span properties before the span is published anywhere
s.gcmarkBits = newMarkBits(uintptr(s.nelems))
s.allocBits = newAllocBits(uintptr(s.nelems))
+ // Adjust s.limit down to the object-containing part of the span.
+ s.limit = s.base() + uintptr(s.elemsize)*uintptr(s.nelems)
+
// It's safe to access h.sweepgen without the heap lock because it's
// only ever updated with the world stopped and we run on the
// systemstack which blocks a STW transition.
span.list = nil
span.startAddr = base
span.npages = npages
+ span.limit = base + npages*gc.PageSize // see go.dev/issue/74288; adjusted later for heap spans
span.allocCount = 0
span.spanclass = 0
span.elemsize = 0