// operations (all the *64 operations in internal/runtime/atomic).
var AtomicFields = []uintptr{
unsafe.Offsetof(m{}.procid),
- unsafe.Offsetof(p{}.gcFractionalMarkTime),
unsafe.Offsetof(profBuf{}.overflow),
unsafe.Offsetof(profBuf{}.overflowTime),
unsafe.Offsetof(heapStatsDelta{}.tinyAllocCount),
return true
}
p := getg().m.p.ptr()
- selfTime := p.gcFractionalMarkTime + (now - p.gcMarkWorkerStartTime)
+ selfTime := p.gcFractionalMarkTime.Load() + (now - p.gcMarkWorkerStartTime)
// Add some slack to the utilization goal so that the
// fractional worker isn't behind again the instant it exits.
return float64(selfTime)/float64(delta) > 1.2*gcController.fractionalUtilizationGoal
pp.limiterEvent.stop(limiterEventIdleMarkWork, now)
}
if pp.gcMarkWorkerMode == gcMarkWorkerFractionalMode {
- atomic.Xaddint64(&pp.gcFractionalMarkTime, duration)
+ pp.gcFractionalMarkTime.Add(duration)
}
// We'll releasem after this point and thus this P may run
// Clear per-P state
for _, p := range allp {
p.gcAssistTime = 0
- p.gcFractionalMarkTime = 0
+ p.gcFractionalMarkTime.Store(0)
}
if trigger.kind == gcTriggerTime {
//
// This should be kept in sync with pollFractionalWorkerExit.
delta := now - c.markStartTime
- if delta > 0 && float64(pp.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal {
+ if delta > 0 && float64(pp.gcFractionalMarkTime.Load())/float64(delta) > c.fractionalUtilizationGoal {
// Nope. No need to run a fractional worker.
gcBgMarkWorkerPool.push(&node.node)
return nil, now
// Per-P GC state
gcAssistTime int64 // Nanoseconds in assistAlloc
- gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic)
+ gcFractionalMarkTime atomic.Int64 // Nanoseconds in fractional mark worker
// limiterEvent tracks events for the GC CPU limiter.
limiterEvent limiterEvent