]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: split gc_m into gcMark and gcSweep
authorRuss Cox <rsc@golang.org>
Thu, 19 Feb 2015 21:43:27 +0000 (16:43 -0500)
committerRuss Cox <rsc@golang.org>
Fri, 20 Feb 2015 17:00:39 +0000 (17:00 +0000)
This is a nice split but more importantly it provides a better
way to fit the checkmark phase into the sequencing.

Also factor out common span copying into gcSpanCopy.

Change-Id: Ia058644974e4ed4ac3cf4b017a3446eb2284d053
Reviewed-on: https://go-review.googlesource.com/5333
Reviewed-by: Austin Clements <austin@google.com>
src/runtime/mgc.go
src/runtime/mgcmark.go
src/runtime/mgcsweep.go

index 4adc0f4fbe859abf756480614d8c4c3ff55f6a92..1c1248936ce5b33ca31ffb9cac0f274ad75f345c 100644 (file)
@@ -120,7 +120,6 @@ import "unsafe"
 
 const (
        _DebugGC         = 0
-       _DebugGCPtrs     = false // if true, print trace of every pointer load during GC
        _ConcurrentSweep = true
        _FinBlockSize    = 4 * 1024
        _RootData        = 0
@@ -357,41 +356,39 @@ func gc(mode int) {
        // TODO(rsc): Should the concurrent GC clear pools earlier?
        clearpools()
 
+       _g_ := getg()
+       _g_.m.traceback = 2
+       gp := _g_.m.curg
+       casgstatus(gp, _Grunning, _Gwaiting)
+       gp.waitreason = "garbage collection"
+
        // Run gc on the g0 stack.  We do this so that the g stack
        // we're currently running on will no longer change.  Cuts
        // the root set down a bit (g0 stacks are not scanned, and
        // we don't need to scan gc's internal state).  We also
        // need to switch to g0 so we can shrink the stack.
        systemstack(func() {
-               gc_m(startTime, mode == gcForceBlockMode)
-       })
-
-       systemstack(func() {
-               // Called from malloc.go using systemstack.
-               // The world is stopped. Rerun the scan and mark phases
-               // using the bitMarkedCheck bit instead of the
-               // bitMarked bit. If the marking encounters an
-               // bitMarked bit that is not set then we throw.
-               //go:nowritebarrier
-               if debug.gccheckmark == 0 {
-                       return
+               gcMark(startTime)
+               if debug.gccheckmark > 0 {
+                       // Run a full stop-the-world mark using checkmark bits,
+                       // to check that we didn't forget to mark anything during
+                       // the concurrent mark process.
+                       initCheckmarks()
+                       gcMark(startTime)
+                       clearCheckmarks()
                }
+               gcSweep(mode)
 
-               if checkmarkphase {
-                       throw("gccheckmark_m, entered with checkmarkphase already true")
+               if debug.gctrace > 1 {
+                       startTime = nanotime()
+                       finishsweep_m()
+                       gcMark(startTime)
+                       gcSweep(mode)
                }
-
-               checkmarkphase = true
-               initCheckmarks()
-               gc_m(startTime, mode == gcForceBlockMode) // turns off checkmarkphase + calls clearcheckmarkbits
        })
 
-       if debug.gctrace > 1 {
-               startTime = nanotime()
-               systemstack(func() {
-                       gc_m(startTime, mode == gcForceBlockMode)
-               })
-       }
+       _g_.m.traceback = 0
+       casgstatus(gp, _Gwaiting, _Grunning)
 
        if trace.enabled {
                traceGCDone()
@@ -427,56 +424,24 @@ func gc(mode int) {
        }
 }
 
+// gcMark runs the mark (or, for concurrent GC, mark termination)
 // STW is in effect at this point.
 //TODO go:nowritebarrier
-func gc_m(start_time int64, eagersweep bool) {
-       if _DebugGCPtrs {
-               print("GC start\n")
-       }
-
-       _g_ := getg()
-       gp := _g_.m.curg
-       casgstatus(gp, _Grunning, _Gwaiting)
-       gp.waitreason = "garbage collection"
-
-       gcphase = _GCmarktermination
+func gcMark(start_time int64) {
        if debug.allocfreetrace > 0 {
                tracegc()
        }
 
-       _g_.m.traceback = 2
        t0 := start_time
        work.tstart = start_time
+       gcphase = _GCmarktermination
 
        var t1 int64
        if debug.gctrace > 0 {
                t1 = nanotime()
        }
 
-       if !checkmarkphase {
-               // TODO(austin) This is a noop beceause we should
-               // already have swept everything to the current
-               // sweepgen.
-               finishsweep_m() // skip during checkmark debug phase.
-       }
-
-       // Cache runtime.mheap_.allspans in work.spans to avoid conflicts with
-       // resizing/freeing allspans.
-       // New spans can be created while GC progresses, but they are not garbage for
-       // this round:
-       //  - new stack spans can be created even while the world is stopped.
-       //  - new malloc spans can be created during the concurrent sweep
-
-       // Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
-       lock(&mheap_.lock)
-       // Free the old cached sweep array if necessary.
-       if work.spans != nil && &work.spans[0] != &h_allspans[0] {
-               sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
-       }
-       // Cache the current array for marking.
-       mheap_.gcspans = mheap_.allspans
-       work.spans = h_allspans
-       unlock(&mheap_.lock)
+       gcCopySpans()
 
        work.nwait = 0
        work.ndone = 0
@@ -584,60 +549,59 @@ func gc_m(start_time int64, eagersweep bool) {
                sweep.nbgsweep = 0
                sweep.npausesweep = 0
        }
+}
 
-       if debug.gccheckmark > 0 {
-               if !checkmarkphase {
-                       // first half of two-pass; don't set up sweep
-                       casgstatus(gp, _Gwaiting, _Grunning)
-                       return
-               }
-               checkmarkphase = false // done checking marks
-               clearCheckmarks()
-       }
+func gcSweep(mode int) {
+       gcCopySpans()
 
-       // See the comment in the beginning of this function as to why we need the following.
-       // Even if this is still stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
        lock(&mheap_.lock)
-       // Free the old cached mark array if necessary.
-       if work.spans != nil && &work.spans[0] != &h_allspans[0] {
-               sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
-       }
-
-       // Cache the current array for sweeping.
-       mheap_.gcspans = mheap_.allspans
        mheap_.sweepgen += 2
        mheap_.sweepdone = 0
-       work.spans = h_allspans
        sweep.spanidx = 0
        unlock(&mheap_.lock)
 
-       if _ConcurrentSweep && !eagersweep {
-               lock(&gclock)
-               if !sweep.started {
-                       go bgsweep()
-                       sweep.started = true
-               } else if sweep.parked {
-                       sweep.parked = false
-                       ready(sweep.g)
-               }
-               unlock(&gclock)
-       } else {
+       if !_ConcurrentSweep || mode == gcForceBlockMode {
+               // Special case synchronous sweep.
                // Sweep all spans eagerly.
                for sweepone() != ^uintptr(0) {
                        sweep.npausesweep++
                }
                // Do an additional mProf_GC, because all 'free' events are now real as well.
                mProf_GC()
+               mProf_GC()
+               return
        }
 
+       // Background sweep.
+       lock(&sweep.lock)
+       if !sweep.started {
+               go bgsweep()
+               sweep.started = true
+       } else if sweep.parked {
+               sweep.parked = false
+               ready(sweep.g)
+       }
+       unlock(&sweep.lock)
        mProf_GC()
-       _g_.m.traceback = 0
+}
 
-       if _DebugGCPtrs {
-               print("GC end\n")
+func gcCopySpans() {
+       // Cache runtime.mheap_.allspans in work.spans to avoid conflicts with
+       // resizing/freeing allspans.
+       // New spans can be created while GC progresses, but they are not garbage for
+       // this round:
+       //  - new stack spans can be created even while the world is stopped.
+       //  - new malloc spans can be created during the concurrent sweep
+       // Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
+       lock(&mheap_.lock)
+       // Free the old cached mark array if necessary.
+       if work.spans != nil && &work.spans[0] != &h_allspans[0] {
+               sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
        }
-
-       casgstatus(gp, _Gwaiting, _Grunning)
+       // Cache the current array for sweeping.
+       mheap_.gcspans = mheap_.allspans
+       work.spans = h_allspans
+       unlock(&mheap_.lock)
 }
 
 // Hooks for other packages
index d790af34054c70459d53b823002be227cbf99b0a..1d6c1e8e22fadfba7413ce8d8559766aa504c6dc 100644 (file)
@@ -86,7 +86,7 @@ func markroot(desc *parfor, i uint32) {
                        if s.state != mSpanInUse {
                                continue
                        }
-                       if !checkmarkphase && s.sweepgen != sg {
+                       if !useCheckmark && s.sweepgen != sg {
                                // sweepgen was updated (+2) during non-checkmark GC pass
                                print("sweep ", s.sweepgen, " ", sg, "\n")
                                throw("gc: unswept span")
@@ -458,7 +458,7 @@ func scanobject(b, n uintptr, ptrmask *uint8, gcw *gcWorkProducer) {
                }
 
                if bits&typePointer != typePointer {
-                       print("gc checkmarkphase=", checkmarkphase, " b=", hex(b), " ptrmask=", ptrmask, "\n")
+                       print("gc useCheckmark=", useCheckmark, " b=", hex(b), " ptrmask=", ptrmask, "\n")
                        throw("unexpected garbage collection bits")
                }
 
@@ -470,7 +470,7 @@ func scanobject(b, n uintptr, ptrmask *uint8, gcw *gcWorkProducer) {
                        continue
                }
 
-               if mheap_.shadow_enabled && debug.wbshadow >= 2 && debug.gccheckmark > 0 && checkmarkphase {
+               if mheap_.shadow_enabled && debug.wbshadow >= 2 && debug.gccheckmark > 0 && useCheckmark {
                        checkwbshadow((*uintptr)(unsafe.Pointer(b + i)))
                }
 
@@ -528,7 +528,7 @@ func greyobject(obj, base, off uintptr, hbits heapBits, gcw *gcWorkProducer) {
                throw("greyobject: obj not pointer-aligned")
        }
 
-       if checkmarkphase {
+       if useCheckmark {
                if !hbits.isMarked() {
                        print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), "\n")
                        print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n")
@@ -591,7 +591,7 @@ func greyobject(obj, base, off uintptr, hbits heapBits, gcw *gcWorkProducer) {
                hbits.setMarked()
        }
 
-       if !checkmarkphase && hbits.typeBits() == typeDead {
+       if !useCheckmark && hbits.typeBits() == typeDead {
                return // noscan object
        }
 
@@ -611,7 +611,7 @@ func gcmarknewobject_m(obj uintptr) {
        if gcphase != _GCmarktermination {
                throw("marking new object while not in mark termination phase")
        }
-       if checkmarkphase { // The world should be stopped so this should not happen.
+       if useCheckmark { // The world should be stopped so this should not happen.
                throw("gcmarknewobject called while doing checkmark")
        }
 
@@ -636,13 +636,14 @@ func gcmarknewobject_m(obj uintptr) {
 // there are no more pointers in the object. This information is held
 // in the second nibble.
 
-// When marking an object if the bool checkmarkphase is true one uses the above
-// encoding, otherwise one uses the bitMarked bit in the lower two bits
-// of the nibble.
-var checkmarkphase = false
+// If useCheckmark is true, marking of an object uses the
+// checkmark bits (encoding above) instead of the standard
+// mark bits.
+var useCheckmark = false
 
 //go:nowritebarrier
 func initCheckmarks() {
+       useCheckmark = true
        for _, s := range work.spans {
                if s.state == _MSpanInUse {
                        heapBitsForSpan(s.base()).initCheckmarkSpan(s.layout())
@@ -651,6 +652,7 @@ func initCheckmarks() {
 }
 
 func clearCheckmarks() {
+       useCheckmark = false
        for _, s := range work.spans {
                if s.state == _MSpanInUse {
                        heapBitsForSpan(s.base()).clearCheckmarkSpan(s.layout())
index 92ddc51e1fce81c08966e8612a27bab2ab674d37..ab18d5ff881da66f4b6abc0c12545636486504d4 100644 (file)
@@ -11,8 +11,8 @@ import "unsafe"
 var sweep sweepdata
 
 // State of background sweep.
-// Protected by gclock.
 type sweepdata struct {
+       lock    mutex
        g       *g
        parked  bool
        started bool
@@ -23,8 +23,6 @@ type sweepdata struct {
        npausesweep uint32
 }
 
-var gclock mutex
-
 //go:nowritebarrier
 func finishsweep_m() {
        // The world is stopped so we should be able to complete the sweeps
@@ -51,16 +49,16 @@ func bgsweep() {
                        sweep.nbgsweep++
                        Gosched()
                }
-               lock(&gclock)
+               lock(&sweep.lock)
                if !gosweepdone() {
                        // This can happen if a GC runs between
                        // gosweepone returning ^0 above
                        // and the lock being acquired.
-                       unlock(&gclock)
+                       unlock(&sweep.lock)
                        continue
                }
                sweep.parked = true
-               goparkunlock(&gclock, "GC sweep wait", traceEvGoBlock)
+               goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock)
        }
 }
 
@@ -145,10 +143,6 @@ func mSpan_EnsureSwept(s *mspan) {
 // caller takes care of it.
 //TODO go:nowritebarrier
 func mSpan_Sweep(s *mspan, preserve bool) bool {
-       if checkmarkphase {
-               throw("MSpan_Sweep: checkmark only runs in STW and after the sweep")
-       }
-
        // It's critical that we enter this function with preemption disabled,
        // GC must not start while we are in the middle of this function.
        _g_ := getg()