// totaltime is the CPU nanoseconds spent in GC since the
// program started if debug.gctrace > 0.
totaltime int64
+
+ // bytesMarked is the number of bytes marked this cycle. This
+ // includes bytes blackened in scanned objects, noscan objects
+ // that go straight to black, and permagrey objects scanned by
+ // markroot during the concurrent scan phase. This is updated
+ // atomically during the cycle. Updates may be batched
+ // arbitrarily, since the value is only read at the end of the
+ // cycle.
+ //
+ // Because of benign races during marking, this number may not
+ // be the exact number of marked bytes, but it should be very
+ // close.
+ bytesMarked uint64
}
// GC runs a garbage collection.
// reclaimed until the next GC cycle.
clearpools()
+ work.bytesMarked = 0
+
if mode == gcBackgroundMode { // Do as much work concurrently as possible
systemstack(func() {
gcphase = _GCscan
restartg(gp)
}
}
+
+ // Root aren't part of the heap, so don't count them toward
+ // marked heap bytes.
+ gcw.bytesMarked = 0
gcw.dispose()
}
}
gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
tracebackdefers(gp, scanframe, nil)
+ // Stacks aren't part of the heap, so don't count them toward
+ // marked heap bytes.
+ gcw.bytesMarked = 0
gcw.disposeToCache()
gp.gcscanvalid = true
}
}
// Mark the object.
- if obj, hbits, _ := heapBitsForObject(obj); obj != 0 {
- greyobject(obj, b, i, hbits, gcw)
+ if obj, hbits, span := heapBitsForObject(obj); obj != 0 {
+ greyobject(obj, b, i, hbits, span, gcw)
}
}
+ gcw.bytesMarked += uint64(n)
}
// Shade the object if it isn't already.
if !inheap(b) {
throw("shade: passed an address not in the heap")
}
- if obj, hbits, _ := heapBitsForObject(b); obj != 0 {
+ if obj, hbits, span := heapBitsForObject(b); obj != 0 {
// TODO: this would be a great place to put a check to see
// if we are harvesting and if we are then we should
// figure out why there is a call to shade when the
// }
var gcw gcWork
- greyobject(obj, 0, 0, hbits, &gcw)
+ greyobject(obj, 0, 0, hbits, span, &gcw)
// This is part of the write barrier so put the wbuf back.
if gcphase == _GCmarktermination {
gcw.dispose()
// Return possibly new workbuf to use.
// base and off are for debugging only and could be removed.
//go:nowritebarrier
-func greyobject(obj, base, off uintptr, hbits heapBits, gcw *gcWork) {
+func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork) {
// obj should be start of allocation, and so must be at least pointer-aligned.
if obj&(ptrSize-1) != 0 {
throw("greyobject: obj not pointer-aligned")
// If this is a noscan object, fast-track it to black
// instead of greying it.
if hbits.typeBits() == typeDead {
+ gcw.bytesMarked += uint64(span.elemsize)
return
}
}
// When in GCmarkterminate phase we allocate black.
//go:nowritebarrier
-func gcmarknewobject_m(obj uintptr) {
+func gcmarknewobject_m(obj, size uintptr) {
if gcphase != _GCmarktermination {
throw("marking new object while not in mark termination phase")
}
}
heapBitsForAddr(obj).setMarked()
+ xadd64(&work.bytesMarked, int64(size))
}
// Checkmarking
type gcWork struct {
// Invariant: wbuf is never full or empty
wbuf wbufptr
+
+ // Bytes marked (blackened) on this gcWork. This is aggregated
+ // into work.bytesMarked by dispose.
+ bytesMarked uint64
}
// initFromCache fetches work from this M's currentwbuf cache.
putpartial(wbuf.ptr(), 167)
w.wbuf = 0
}
+ if w.bytesMarked != 0 {
+ // dispose happens relatively infrequently. If this
+ // atomic becomes a problem, we should first try to
+ // dispose less and if necessary aggregate in a per-P
+ // counter.
+ xadd64(&work.bytesMarked, int64(w.bytesMarked))
+ w.bytesMarked = 0
+ }
}
// disposeToCache returns any cached pointers to this M's currentwbuf.
}
w.wbuf = 0
}
+ if w.bytesMarked != 0 {
+ xadd64(&work.bytesMarked, int64(w.bytesMarked))
+ w.bytesMarked = 0
+ }
}
// balance moves some work that's cached in this gcWork back on the