From dac936a4ab8490d90afb9786a69854818f634dc5 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Wed, 18 Sep 2019 15:33:17 +0000 Subject: [PATCH] runtime: make more page sweeper operations atomic This change makes it so that allocation and free related page sweeper metadata operations (e.g. pageInUse and pagesInUse) are atomic rather than protected by the heap lock. This will help in reducing the length of the critical path with the heap lock held in future changes. Updates #35112. Change-Id: Ie82bff024204dd17c4c671af63350a7a41add354 Reviewed-on: https://go-review.googlesource.com/c/go/+/196640 Run-TryBot: Michael Knyszek TryBot-Result: Gobot Gobot Reviewed-by: Austin Clements --- src/runtime/mgc.go | 3 ++- src/runtime/mheap.go | 20 ++++++++++++-------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 0666099e02..0bc5568442 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -865,7 +865,8 @@ func gcSetTriggerRatio(triggerRatio float64) { heapDistance = _PageSize } pagesSwept := atomic.Load64(&mheap_.pagesSwept) - sweepDistancePages := int64(mheap_.pagesInUse) - int64(pagesSwept) + pagesInUse := atomic.Load64(&mheap_.pagesInUse) + sweepDistancePages := int64(pagesInUse) - int64(pagesSwept) if sweepDistancePages <= 0 { mheap_.sweepPagesPerByte = 0 } else { diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 72702534d9..70e9f9284b 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -90,7 +90,7 @@ type mheap struct { // accounting for current progress. If we could only adjust // the slope, it would create a discontinuity in debt if any // progress has already been made. - pagesInUse uint64 // pages of spans in stats mSpanInUse; R/W with mheap.lock + pagesInUse uint64 // pages of spans in stats mSpanInUse; updated atomically pagesSwept uint64 // pages swept this cycle; updated atomically pagesSweptBasis uint64 // pagesSwept to use as the origin of the sweep ratio; updated atomically sweepHeapLiveBasis uint64 // value of heap_live to use as the origin of sweep ratio; written with lock, read without @@ -238,7 +238,7 @@ type heapArena struct { // but only the bit corresponding to the first page in each // span is used. // - // Writes are protected by mheap_.lock. + // Reads and writes are atomic. pageInUse [pagesPerArena / 8]uint8 // pageMarks is a bitmap that indicates which spans have any @@ -812,7 +812,7 @@ func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr { // Scan this bitmap chunk for spans that are in-use // but have no marked objects on them. for i := range inUse { - inUseUnmarked := inUse[i] &^ marked[i] + inUseUnmarked := atomic.Load8(&inUse[i]) &^ marked[i] if inUseUnmarked == 0 { continue } @@ -831,7 +831,7 @@ func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr { // spans were freed when we dropped the // lock and we don't want to get stale // pointers from the spans array. - inUseUnmarked = inUse[i] &^ marked[i] + inUseUnmarked = atomic.Load8(&inUse[i]) &^ marked[i] } } } @@ -934,11 +934,15 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass) *mspan { s.state.set(mSpanInUse) // Mark in-use span in arena page bitmap. + // + // This publishes the span to the page sweeper, so + // it's imperative that the span be completely initialized + // prior to this line. arena, pageIdx, pageMask := pageIndexOf(s.base()) - arena.pageInUse[pageIdx] |= pageMask + atomic.Or8(&arena.pageInUse[pageIdx], pageMask) // Update related page sweeper stats. - h.pagesInUse += uint64(npage) + atomic.Xadd64(&h.pagesInUse, int64(npage)) } // heap_scan and heap_live were updated. if gcBlackenEnabled != 0 { @@ -1264,11 +1268,11 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) { print("mheap.freeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n") throw("mheap.freeSpanLocked - invalid free") } - h.pagesInUse -= uint64(s.npages) + atomic.Xadd64(&h.pagesInUse, -int64(s.npages)) // Clear in-use bit in arena page bitmap. arena, pageIdx, pageMask := pageIndexOf(s.base()) - arena.pageInUse[pageIdx] &^= pageMask + atomic.And8(&arena.pageInUse[pageIdx], ^pageMask) default: throw("mheap.freeSpanLocked - invalid span state") } -- 2.50.0