size = s.elemsize
}
+ var scanSize uintptr
if noscan {
// All objects are pre-marked as noscan. Nothing to do.
} else {
// pointers, GC has to scan to the last
// element.
if typ.ptrdata != 0 {
- c.local_scan += dataSize - typ.size + typ.ptrdata
+ scanSize = dataSize - typ.size + typ.ptrdata
}
} else {
- c.local_scan += typ.ptrdata
+ scanSize = typ.ptrdata
}
+ c.local_scan += scanSize
// Ensure that the stores above that initialize x to
// type-safe memory and set the heap bits occur before
// a race marking the bit.
if gcphase == _GCmarktermination || gcBlackenPromptly {
systemstack(func() {
- gcmarknewobject_m(uintptr(x), size)
+ gcmarknewobject_m(uintptr(x), size, scanSize)
})
}
// scanWork is the total scan work performed this cycle. This
// is updated atomically during the cycle. Updates occur in
// bounded batches, since it is both written and read
- // throughout the cycle.
+ // throughout the cycle. At the end of the cycle, this is how
+ // much of the retained heap is scannable.
//
// Currently this is the bytes of heap scanned. For most uses,
// this is an opaque unit of work, but for estimation the
work.markrootDone = true
for i := 0; i < int(gomaxprocs); i++ {
- if !allp[i].gcw.empty() {
+ gcw := &allp[i].gcw
+ if !gcw.empty() {
throw("P has cached GC work at end of mark termination")
}
+ if gcw.scanWork != 0 || gcw.bytesMarked != 0 {
+ throw("P has unflushed stats at end of mark termination")
+ }
}
if trace.enabled {
// If gcBlackenPromptly is true we are in the second mark phase phase so we allocate black.
//go:nowritebarrier
-func gcmarknewobject_m(obj, size uintptr) {
+func gcmarknewobject_m(obj, size, scanSize uintptr) {
if useCheckmark && !gcBlackenPromptly { // The world should be stopped so this should not happen.
throw("gcmarknewobject called while doing checkmark")
}
heapBitsForAddr(obj).setMarked()
atomic.Xadd64(&work.bytesMarked, int64(size))
+ gcw := &getg().m.p.ptr().gcw
+ gcw.scanWork += int64(scanSize)
}
// Checkmarking