// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
if gcphase != _GCoff {
- // Pass the full size of the allocation to the number of bytes
- // marked.
- //
- // If !goexperiment.AllocHeaders, "size" doesn't include the
- // allocation header, so use span.elemsize unconditionally.
- gcmarknewobject(span, uintptr(x), span.elemsize)
+ gcmarknewobject(span, uintptr(x))
}
if raceenabled {
asanunpoison(x, userSize)
}
+ // If !goexperiment.AllocHeaders, "size" doesn't include the
+ // allocation header, so use span.elemsize as the "full" size
+ // for various computations below.
+ //
+ // TODO(mknyszek): We should really count the header as part
+ // of gc_sys or something, but it's risky to change the
+ // accounting so much right now. Just pretend its internal
+ // fragmentation and match the GC's accounting by using the
+ // whole allocation slot.
+ fullSize := size
+ if goexperiment.AllocHeaders {
+ fullSize = span.elemsize
+ }
if rate := MemProfileRate; rate > 0 {
// Note cache c only valid while m acquired; see #47302
- if rate != 1 && size < c.nextSample {
- c.nextSample -= size
+ //
+ // N.B. Use the full size because that matches how the GC
+ // will update the mem profile on the "free" side.
+ if rate != 1 && fullSize < c.nextSample {
+ c.nextSample -= fullSize
} else {
- profilealloc(mp, x, size)
+ profilealloc(mp, x, fullSize)
}
}
mp.mallocing = 0
if goexperiment.AllocHeaders && header != nil {
throw("unexpected malloc header in delayed zeroing of large object")
}
+ // N.B. size == fullSize always in this case.
memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
}
if inittrace.active && inittrace.id == getg().goid {
// Init functions are executed sequentially in a single goroutine.
- inittrace.bytes += uint64(size)
+ inittrace.bytes += uint64(fullSize)
}
}
if assistG != nil {
// Account for internal fragmentation in the assist
// debt now that we know it.
- assistG.gcAssistBytes -= int64(size - dataSize)
+ //
+ // N.B. Use the full size because that's how the rest
+ // of the GC accounts for bytes marked.
+ assistG.gcAssistBytes -= int64(fullSize - dataSize)
}
if shouldhelpgc {
//
//go:nowritebarrier
//go:nosplit
-func gcmarknewobject(span *mspan, obj, size uintptr) {
+func gcmarknewobject(span *mspan, obj uintptr) {
if useCheckmark { // The world should be stopped so this should not happen.
throw("gcmarknewobject called while doing checkmark")
}
}
gcw := &getg().m.p.ptr().gcw
- gcw.bytesMarked += uint64(size)
+ gcw.bytesMarked += uint64(span.elemsize)
}
// gcMarkTinyAllocs greys all active tiny alloc blocks.