From: Michael Anthony Knyszek Date: Thu, 12 Sep 2019 18:24:56 +0000 (+0000) Subject: runtime: add option to scavenge with lock held throughout X-Git-Tag: go1.14beta1~347 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=e5ce13c178cc0be72ca220b4c3f0c95f570c19ab;p=gostls13.git runtime: add option to scavenge with lock held throughout This change adds a "locked" parameter to scavenge() and scavengeone() which allows these methods to be run with the heap lock acquired, and synchronously with respect to others which acquire the heap lock. This mode is necessary for both heap-growth scavenging (multiple asynchronous scavengers here could be problematic) and debug.FreeOSMemory. Updates #35112. Change-Id: I24eea8e40f971760999c980981893676b4c9b666 Reviewed-on: https://go-review.googlesource.com/c/go/+/195699 Reviewed-by: Austin Clements Reviewed-by: Keith Randall --- diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 435b330fe0..10066115b4 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -866,9 +866,9 @@ func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) { func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData { return (*PallocData)(&((*pageAlloc)(p).chunks[i])) } -func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) { +func (p *PageAlloc) Scavenge(nbytes uintptr, locked bool) (r uintptr) { systemstack(func() { - r = (*pageAlloc)(p).scavenge(nbytes) + r = (*pageAlloc)(p).scavenge(nbytes, locked) }) return } diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go index f716210fef..aeab2d60e0 100644 --- a/src/runtime/mgcscavenge.go +++ b/src/runtime/mgcscavenge.go @@ -421,16 +421,17 @@ func bgscavenge(c chan int) { // // Returns the amount of memory scavenged in bytes. // -// s.mheapLock must not be locked. +// If locked == false, s.mheapLock must not be locked. If locked == true, +// s.mheapLock must be locked. // // Must run on the system stack because scavengeOne must run on the // system stack. // //go:systemstack -func (s *pageAlloc) scavenge(nbytes uintptr) uintptr { +func (s *pageAlloc) scavenge(nbytes uintptr, locked bool) uintptr { released := uintptr(0) for released < nbytes { - r := s.scavengeOne(nbytes - released) + r := s.scavengeOne(nbytes-released, locked) if r == 0 { // Nothing left to scavenge! Give up. break @@ -457,11 +458,14 @@ func (s *pageAlloc) resetScavengeAddr() { // // Should it exhaust the heap, it will return 0 and set s.scavAddr to minScavAddr. // -// s.mheapLock must not be locked. Must be run on the system stack because it -// acquires the heap lock. +// If locked == false, s.mheapLock must not be locked. +// If locked == true, s.mheapLock must be locked. +// +// Must be run on the system stack because it either acquires the heap lock +// or executes with the heap lock acquired. // //go:systemstack -func (s *pageAlloc) scavengeOne(max uintptr) uintptr { +func (s *pageAlloc) scavengeOne(max uintptr, locked bool) uintptr { // Calculate the maximum number of pages to scavenge. // // This should be alignUp(max, pageSize) / pageSize but max can and will @@ -483,10 +487,22 @@ func (s *pageAlloc) scavengeOne(max uintptr) uintptr { minPages = 1 } - lock(s.mheapLock) + // Helpers for locking and unlocking only if locked == false. + lockHeap := func() { + if !locked { + lock(s.mheapLock) + } + } + unlockHeap := func() { + if !locked { + unlock(s.mheapLock) + } + } + + lockHeap() top := chunkIndex(s.scavAddr) if top < s.start { - unlock(s.mheapLock) + unlockHeap() return 0 } @@ -498,10 +514,10 @@ func (s *pageAlloc) scavengeOne(max uintptr) uintptr { // If we found something, scavenge it and return! if npages != 0 { s.scavengeRangeLocked(ci, base, npages) - unlock(s.mheapLock) + unlockHeap() return uintptr(npages) * pageSize } - unlock(s.mheapLock) + unlockHeap() // Slow path: iterate optimistically looking for any free and unscavenged page. // If we think we see something, stop and verify it! @@ -528,7 +544,7 @@ func (s *pageAlloc) scavengeOne(max uintptr) uintptr { } // We found a candidate, so let's lock and verify it. - lock(s.mheapLock) + lockHeap() // Find, verify, and scavenge if we can. chunk := &s.chunks[i] @@ -536,7 +552,7 @@ func (s *pageAlloc) scavengeOne(max uintptr) uintptr { if npages > 0 { // We found memory to scavenge! Mark the bits and report that up. s.scavengeRangeLocked(i, base, npages) - unlock(s.mheapLock) + unlockHeap() return uintptr(npages) * pageSize } @@ -544,14 +560,14 @@ func (s *pageAlloc) scavengeOne(max uintptr) uintptr { // all the way down to where we searched as scavenged for future calls // and keep iterating. s.scavAddr = chunkBase(i-1) + pallocChunkPages*pageSize - 1 - unlock(s.mheapLock) + unlockHeap() } - lock(s.mheapLock) + lockHeap() // We couldn't find anything, so signal that there's nothing left // to scavenge. s.scavAddr = minScavAddr - unlock(s.mheapLock) + unlockHeap() return 0 } diff --git a/src/runtime/mgcscavenge_test.go b/src/runtime/mgcscavenge_test.go index 3e77ee8f81..74fcfe7a0d 100644 --- a/src/runtime/mgcscavenge_test.go +++ b/src/runtime/mgcscavenge_test.go @@ -364,12 +364,12 @@ func TestPageAllocScavenge(t *testing.T) { } for name, v := range tests { v := v - t.Run(name, func(t *testing.T) { + runTest := func(t *testing.T, locked bool) { b := NewPageAlloc(v.beforeAlloc, v.beforeScav) defer FreePageAlloc(b) for iter, h := range v.expect { - if got := b.Scavenge(h.request); got != h.expect { + if got := b.Scavenge(h.request, locked); got != h.expect { t.Fatalf("bad scavenge #%d: want %d, got %d", iter+1, h.expect, got) } } @@ -377,6 +377,12 @@ func TestPageAllocScavenge(t *testing.T) { defer FreePageAlloc(want) checkPageAlloc(t, want, b) + } + t.Run(name, func(t *testing.T) { + runTest(t, false) + }) + t.Run(name+"Locked", func(t *testing.T) { + runTest(t, true) }) } }