// information about the heap yet) so this is fine, and avoids a fault
// or garbage data later.
if lastHeapGoal == 0 {
- mheap_.scavengeGoal = ^uint64(0)
+ atomic.Store64(&mheap_.scavengeGoal, ^uint64(0))
return
}
// Compute our scavenging goal.
// the background scavenger. We disable the background scavenger if there's
// less than one physical page of work to do because it's not worth it.
if retainedNow <= retainedGoal || retainedNow-retainedGoal < uint64(physPageSize) {
- mheap_.scavengeGoal = ^uint64(0)
+ atomic.Store64(&mheap_.scavengeGoal, ^uint64(0))
return
}
- mheap_.scavengeGoal = retainedGoal
+ atomic.Store64(&mheap_.scavengeGoal, retainedGoal)
}
// Sleep/wait state of the background scavenger.
lock(&mheap_.lock)
// If background scavenging is disabled or if there's no work to do just park.
- retained, goal := heapRetained(), mheap_.scavengeGoal
+ retained, goal := heapRetained(), atomic.Load64(&mheap_.scavengeGoal)
if retained <= goal {
unlock(&mheap_.lock)
return
// scavengeGoal is the amount of total retained heap memory (measured by
// heapRetained) that the runtime will try to maintain by returning memory
// to the OS.
+ //
+ // Accessed atomically.
scavengeGoal uint64
// Page reclaimer state
// By scavenging inline we deal with the failure to allocate out of
// memory fragments by scavenging the memory fragments that are least
// likely to be re-used.
- if retained := heapRetained(); retained+uint64(totalGrowth) > h.scavengeGoal {
+ scavengeGoal := atomic.Load64(&h.scavengeGoal)
+ if retained := heapRetained(); retained+uint64(totalGrowth) > scavengeGoal {
todo := totalGrowth
- if overage := uintptr(retained + uint64(totalGrowth) - h.scavengeGoal); todo > overage {
+ if overage := uintptr(retained + uint64(totalGrowth) - scavengeGoal); todo > overage {
todo = overage
}
h.pages.scavenge(todo, false)