]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: make more page sweeper operations atomic
authorMichael Anthony Knyszek <mknyszek@google.com>
Wed, 18 Sep 2019 15:33:17 +0000 (15:33 +0000)
committerMichael Knyszek <mknyszek@google.com>
Fri, 8 Nov 2019 17:00:57 +0000 (17:00 +0000)
This change makes it so that allocation and free related page sweeper
metadata operations (e.g. pageInUse and pagesInUse) are atomic rather
than protected by the heap lock. This will help in reducing the length
of the critical path with the heap lock held in future changes.

Updates #35112.

Change-Id: Ie82bff024204dd17c4c671af63350a7a41add354
Reviewed-on: https://go-review.googlesource.com/c/go/+/196640
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
src/runtime/mgc.go
src/runtime/mheap.go

index 0666099e020599f9acc03c98c171e512f7bfecbb..0bc55684420d213a3e5c233245f2ac292977a5aa 100644 (file)
@@ -865,7 +865,8 @@ func gcSetTriggerRatio(triggerRatio float64) {
                        heapDistance = _PageSize
                }
                pagesSwept := atomic.Load64(&mheap_.pagesSwept)
-               sweepDistancePages := int64(mheap_.pagesInUse) - int64(pagesSwept)
+               pagesInUse := atomic.Load64(&mheap_.pagesInUse)
+               sweepDistancePages := int64(pagesInUse) - int64(pagesSwept)
                if sweepDistancePages <= 0 {
                        mheap_.sweepPagesPerByte = 0
                } else {
index 72702534d98ed6e5256918be1a89d8deb410d19a..70e9f9284be4f720b0d0ad7c891b50c3ca3728a2 100644 (file)
@@ -90,7 +90,7 @@ type mheap struct {
        // accounting for current progress. If we could only adjust
        // the slope, it would create a discontinuity in debt if any
        // progress has already been made.
-       pagesInUse         uint64  // pages of spans in stats mSpanInUse; R/W with mheap.lock
+       pagesInUse         uint64  // pages of spans in stats mSpanInUse; updated atomically
        pagesSwept         uint64  // pages swept this cycle; updated atomically
        pagesSweptBasis    uint64  // pagesSwept to use as the origin of the sweep ratio; updated atomically
        sweepHeapLiveBasis uint64  // value of heap_live to use as the origin of sweep ratio; written with lock, read without
@@ -238,7 +238,7 @@ type heapArena struct {
        // but only the bit corresponding to the first page in each
        // span is used.
        //
-       // Writes are protected by mheap_.lock.
+       // Reads and writes are atomic.
        pageInUse [pagesPerArena / 8]uint8
 
        // pageMarks is a bitmap that indicates which spans have any
@@ -812,7 +812,7 @@ func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
                // Scan this bitmap chunk for spans that are in-use
                // but have no marked objects on them.
                for i := range inUse {
-                       inUseUnmarked := inUse[i] &^ marked[i]
+                       inUseUnmarked := atomic.Load8(&inUse[i]) &^ marked[i]
                        if inUseUnmarked == 0 {
                                continue
                        }
@@ -831,7 +831,7 @@ func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
                                                // spans were freed when we dropped the
                                                // lock and we don't want to get stale
                                                // pointers from the spans array.
-                                               inUseUnmarked = inUse[i] &^ marked[i]
+                                               inUseUnmarked = atomic.Load8(&inUse[i]) &^ marked[i]
                                        }
                                }
                        }
@@ -934,11 +934,15 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass) *mspan {
                s.state.set(mSpanInUse)
 
                // Mark in-use span in arena page bitmap.
+               //
+               // This publishes the span to the page sweeper, so
+               // it's imperative that the span be completely initialized
+               // prior to this line.
                arena, pageIdx, pageMask := pageIndexOf(s.base())
-               arena.pageInUse[pageIdx] |= pageMask
+               atomic.Or8(&arena.pageInUse[pageIdx], pageMask)
 
                // Update related page sweeper stats.
-               h.pagesInUse += uint64(npage)
+               atomic.Xadd64(&h.pagesInUse, int64(npage))
        }
        // heap_scan and heap_live were updated.
        if gcBlackenEnabled != 0 {
@@ -1264,11 +1268,11 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
                        print("mheap.freeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
                        throw("mheap.freeSpanLocked - invalid free")
                }
-               h.pagesInUse -= uint64(s.npages)
+               atomic.Xadd64(&h.pagesInUse, -int64(s.npages))
 
                // Clear in-use bit in arena page bitmap.
                arena, pageIdx, pageMask := pageIndexOf(s.base())
-               arena.pageInUse[pageIdx] &^= pageMask
+               atomic.And8(&arena.pageInUse[pageIdx], ^pageMask)
        default:
                throw("mheap.freeSpanLocked - invalid span state")
        }