}
// Compute the heap distance remaining.
- heapDistance := int64(memstats.next_gc) - int64(memstats.heap_live)
+ heapDistance := int64(memstats.next_gc) - int64(atomic.Load64(&memstats.heap_live))
if heapDistance <= 0 {
// This shouldn't happen, but if it does, avoid
// dividing by zero or setting the assist negative.
}
switch t.kind {
case gcTriggerHeap:
+ // Non-atomic access to heap_live for performance. If
+ // we are going to trigger on this, this thread just
+ // atomically wrote heap_live anyway and we'll see our
+ // own write.
return memstats.heap_live >= memstats.gc_trigger
case gcTriggerTime:
lastgc := int64(atomic.Load64(&memstats.last_gc_nanotime))
now := nanotime()
work.stwprocs, work.maxprocs = gcprocs(), gomaxprocs
work.tSweepTerm = now
- work.heap0 = memstats.heap_live
+ work.heap0 = atomic.Load64(&memstats.heap_live)
work.pauseNS = 0
work.mode = mode
unlock(&allglock)
work.bytesMarked = 0
- work.initialHeapLive = memstats.heap_live
+ work.initialHeapLive = atomic.Load64(&memstats.heap_live)
work.markrootDone = false
}
// leads to a conservative GC rate rather than a GC rate that
// is potentially too low.
//
+ // Reads should likewise be atomic (or during STW).
+ //
// Whenever this is updated, call traceHeapAlloc() and
// gcController.revise().
heap_live uint64