]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: update and access scavengeGoal atomically
authorMichael Anthony Knyszek <mknyszek@google.com>
Mon, 4 Oct 2021 19:52:48 +0000 (19:52 +0000)
committerMichael Knyszek <mknyszek@google.com>
Thu, 4 Nov 2021 20:01:11 +0000 (20:01 +0000)
The first step toward acquiring the heap lock less frequently in the
scavenger.

Change-Id: Idc69fd8602be2c83268c155951230d60e20b42fe
Reviewed-on: https://go-review.googlesource.com/c/go/+/353973
Trust: Michael Knyszek <mknyszek@google.com>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Michael Pratt <mpratt@google.com>
src/runtime/mgcscavenge.go
src/runtime/mheap.go

index fb9b5c86943849b6299e3ebb08cef6658bea68f3..4edeb8739e7e3d9009cbc026f527e21a9406b9f2 100644 (file)
@@ -125,7 +125,7 @@ func gcPaceScavenger(heapGoal, lastHeapGoal uint64) {
        // information about the heap yet) so this is fine, and avoids a fault
        // or garbage data later.
        if lastHeapGoal == 0 {
-               mheap_.scavengeGoal = ^uint64(0)
+               atomic.Store64(&mheap_.scavengeGoal, ^uint64(0))
                return
        }
        // Compute our scavenging goal.
@@ -157,10 +157,10 @@ func gcPaceScavenger(heapGoal, lastHeapGoal uint64) {
        // the background scavenger. We disable the background scavenger if there's
        // less than one physical page of work to do because it's not worth it.
        if retainedNow <= retainedGoal || retainedNow-retainedGoal < uint64(physPageSize) {
-               mheap_.scavengeGoal = ^uint64(0)
+               atomic.Store64(&mheap_.scavengeGoal, ^uint64(0))
                return
        }
-       mheap_.scavengeGoal = retainedGoal
+       atomic.Store64(&mheap_.scavengeGoal, retainedGoal)
 }
 
 // Sleep/wait state of the background scavenger.
@@ -299,7 +299,7 @@ func bgscavenge(c chan int) {
                        lock(&mheap_.lock)
 
                        // If background scavenging is disabled or if there's no work to do just park.
-                       retained, goal := heapRetained(), mheap_.scavengeGoal
+                       retained, goal := heapRetained(), atomic.Load64(&mheap_.scavengeGoal)
                        if retained <= goal {
                                unlock(&mheap_.lock)
                                return
index 057ab06b1d5d08c940401c9e5c46241f27667283..f2f6e7f4cf7f154d4f5dc561ce4defbd0f334a10 100644 (file)
@@ -111,6 +111,8 @@ type mheap struct {
        // scavengeGoal is the amount of total retained heap memory (measured by
        // heapRetained) that the runtime will try to maintain by returning memory
        // to the OS.
+       //
+       // Accessed atomically.
        scavengeGoal uint64
 
        // Page reclaimer state
@@ -1399,9 +1401,10 @@ func (h *mheap) grow(npage uintptr) bool {
        // By scavenging inline we deal with the failure to allocate out of
        // memory fragments by scavenging the memory fragments that are least
        // likely to be re-used.
-       if retained := heapRetained(); retained+uint64(totalGrowth) > h.scavengeGoal {
+       scavengeGoal := atomic.Load64(&h.scavengeGoal)
+       if retained := heapRetained(); retained+uint64(totalGrowth) > scavengeGoal {
                todo := totalGrowth
-               if overage := uintptr(retained + uint64(totalGrowth) - h.scavengeGoal); todo > overage {
+               if overage := uintptr(retained + uint64(totalGrowth) - scavengeGoal); todo > overage {
                        todo = overage
                }
                h.pages.scavenge(todo, false)