]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: duplicate scanobject in greentea and non-greentea files
authorMichael Anthony Knyszek <mknyszek@google.com>
Tue, 22 Jul 2025 23:31:51 +0000 (23:31 +0000)
committerGopher Robot <gobot@golang.org>
Fri, 25 Jul 2025 18:33:18 +0000 (11:33 -0700)
This change exists to help differentiate profile samples spent on
Green Tea and non-Green-Tea GC time in mixed contexts.

Change-Id: I8dea340d2d11ba4c410ae939fb5f37020d0b55d1
Reviewed-on: https://go-review.googlesource.com/c/go/+/689477
Reviewed-by: Michael Pratt <mpratt@google.com>
Auto-Submit: Michael Knyszek <mknyszek@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>

src/runtime/mgcmark.go
src/runtime/mgcmark_greenteagc.go
src/runtime/mgcmark_nogreenteagc.go

index a136c7aeaceda245e37cbe2eee09f1b3e413fb43..b8a1d8fc146eb5655b2e88146a8ab75052f19337 100644 (file)
@@ -1435,107 +1435,6 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState)
        }
 }
 
-// scanobject scans the object starting at b, adding pointers to gcw.
-// b must point to the beginning of a heap object or an oblet.
-// scanobject consults the GC bitmap for the pointer mask and the
-// spans for the size of the object.
-//
-//go:nowritebarrier
-func scanobject(b uintptr, gcw *gcWork) {
-       // Prefetch object before we scan it.
-       //
-       // This will overlap fetching the beginning of the object with initial
-       // setup before we start scanning the object.
-       sys.Prefetch(b)
-
-       // Find the bits for b and the size of the object at b.
-       //
-       // b is either the beginning of an object, in which case this
-       // is the size of the object to scan, or it points to an
-       // oblet, in which case we compute the size to scan below.
-       s := spanOfUnchecked(b)
-       n := s.elemsize
-       if n == 0 {
-               throw("scanobject n == 0")
-       }
-       if s.spanclass.noscan() {
-               // Correctness-wise this is ok, but it's inefficient
-               // if noscan objects reach here.
-               throw("scanobject of a noscan object")
-       }
-
-       var tp typePointers
-       if n > maxObletBytes {
-               // Large object. Break into oblets for better
-               // parallelism and lower latency.
-               if b == s.base() {
-                       // Enqueue the other oblets to scan later.
-                       // Some oblets may be in b's scalar tail, but
-                       // these will be marked as "no more pointers",
-                       // so we'll drop out immediately when we go to
-                       // scan those.
-                       for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
-                               if !gcw.putObjFast(oblet) {
-                                       gcw.putObj(oblet)
-                               }
-                       }
-               }
-
-               // Compute the size of the oblet. Since this object
-               // must be a large object, s.base() is the beginning
-               // of the object.
-               n = s.base() + s.elemsize - b
-               n = min(n, maxObletBytes)
-               tp = s.typePointersOfUnchecked(s.base())
-               tp = tp.fastForward(b-tp.addr, b+n)
-       } else {
-               tp = s.typePointersOfUnchecked(b)
-       }
-
-       var scanSize uintptr
-       for {
-               var addr uintptr
-               if tp, addr = tp.nextFast(); addr == 0 {
-                       if tp, addr = tp.next(b + n); addr == 0 {
-                               break
-                       }
-               }
-
-               // Keep track of farthest pointer we found, so we can
-               // update heapScanWork. TODO: is there a better metric,
-               // now that we can skip scalar portions pretty efficiently?
-               scanSize = addr - b + goarch.PtrSize
-
-               // Work here is duplicated in scanblock and above.
-               // If you make changes here, make changes there too.
-               obj := *(*uintptr)(unsafe.Pointer(addr))
-
-               // At this point we have extracted the next potential pointer.
-               // Quickly filter out nil and pointers back to the current object.
-               if obj != 0 && obj-b >= n {
-                       // Test if obj points into the Go heap and, if so,
-                       // mark the object.
-                       //
-                       // Note that it's possible for findObject to
-                       // fail if obj points to a just-allocated heap
-                       // object because of a race with growing the
-                       // heap. In this case, we know the object was
-                       // just allocated and hence will be marked by
-                       // allocation itself.
-                       if !tryDeferToSpanScan(obj, gcw) {
-                               if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 {
-                                       greyobject(obj, b, addr-b, span, gcw, objIndex)
-                               }
-                       }
-               }
-       }
-       gcw.bytesMarked += uint64(n)
-       gcw.heapScanWork += int64(scanSize)
-       if debug.gctrace > 1 {
-               gcw.stats[s.spanclass.sizeclass()].sparseObjsScanned++
-       }
-}
-
 // scanConservative scans block [b, b+n) conservatively, treating any
 // pointer-like value in the block as a pointer.
 //
index a2f28e95d24eadd7dd58c07af5dac5e906b71e2f..1f2db2e1bb1a18af0d7bb9c0a665ec6766d7de42 100644 (file)
@@ -851,3 +851,107 @@ func (w *gcWork) flushScanStats(dst *[gc.NumSizeClasses]sizeClassScanStats) {
        }
        clear(w.stats[:])
 }
+
+// scanobject scans the object starting at b, adding pointers to gcw.
+// b must point to the beginning of a heap object or an oblet.
+// scanobject consults the GC bitmap for the pointer mask and the
+// spans for the size of the object.
+//
+// Used only for !gcUsesSpanInlineMarkBits spans, but supports all
+// object sizes and is safe to be called on all heap objects.
+//
+//go:nowritebarrier
+func scanobject(b uintptr, gcw *gcWork) {
+       // Prefetch object before we scan it.
+       //
+       // This will overlap fetching the beginning of the object with initial
+       // setup before we start scanning the object.
+       sys.Prefetch(b)
+
+       // Find the bits for b and the size of the object at b.
+       //
+       // b is either the beginning of an object, in which case this
+       // is the size of the object to scan, or it points to an
+       // oblet, in which case we compute the size to scan below.
+       s := spanOfUnchecked(b)
+       n := s.elemsize
+       if n == 0 {
+               throw("scanobject n == 0")
+       }
+       if s.spanclass.noscan() {
+               // Correctness-wise this is ok, but it's inefficient
+               // if noscan objects reach here.
+               throw("scanobject of a noscan object")
+       }
+
+       var tp typePointers
+       if n > maxObletBytes {
+               // Large object. Break into oblets for better
+               // parallelism and lower latency.
+               if b == s.base() {
+                       // Enqueue the other oblets to scan later.
+                       // Some oblets may be in b's scalar tail, but
+                       // these will be marked as "no more pointers",
+                       // so we'll drop out immediately when we go to
+                       // scan those.
+                       for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
+                               if !gcw.putObjFast(oblet) {
+                                       gcw.putObj(oblet)
+                               }
+                       }
+               }
+
+               // Compute the size of the oblet. Since this object
+               // must be a large object, s.base() is the beginning
+               // of the object.
+               n = s.base() + s.elemsize - b
+               n = min(n, maxObletBytes)
+               tp = s.typePointersOfUnchecked(s.base())
+               tp = tp.fastForward(b-tp.addr, b+n)
+       } else {
+               tp = s.typePointersOfUnchecked(b)
+       }
+
+       var scanSize uintptr
+       for {
+               var addr uintptr
+               if tp, addr = tp.nextFast(); addr == 0 {
+                       if tp, addr = tp.next(b + n); addr == 0 {
+                               break
+                       }
+               }
+
+               // Keep track of farthest pointer we found, so we can
+               // update heapScanWork. TODO: is there a better metric,
+               // now that we can skip scalar portions pretty efficiently?
+               scanSize = addr - b + goarch.PtrSize
+
+               // Work here is duplicated in scanblock and above.
+               // If you make changes here, make changes there too.
+               obj := *(*uintptr)(unsafe.Pointer(addr))
+
+               // At this point we have extracted the next potential pointer.
+               // Quickly filter out nil and pointers back to the current object.
+               if obj != 0 && obj-b >= n {
+                       // Test if obj points into the Go heap and, if so,
+                       // mark the object.
+                       //
+                       // Note that it's possible for findObject to
+                       // fail if obj points to a just-allocated heap
+                       // object because of a race with growing the
+                       // heap. In this case, we know the object was
+                       // just allocated and hence will be marked by
+                       // allocation itself.
+                       if !tryDeferToSpanScan(obj, gcw) {
+                               if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 {
+                                       greyobject(obj, b, addr-b, span, gcw, objIndex)
+                               }
+                       }
+               }
+       }
+       gcw.bytesMarked += uint64(n)
+       gcw.heapScanWork += int64(scanSize)
+       if debug.gctrace > 1 {
+               gcw.stats[s.spanclass.sizeclass()].sparseObjsScanned++
+       }
+}
index 6e4f0c4f7255b2b13d5ca243a87ad6cbbb922379..3ae0802e6ce2f151ee8a133b2b8bc43d72b92c7d 100644 (file)
@@ -6,7 +6,12 @@
 
 package runtime
 
-import "internal/runtime/gc"
+import (
+       "internal/goarch"
+       "internal/runtime/gc"
+       "internal/runtime/sys"
+       "unsafe"
+)
 
 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
        bytep, mask := s.gcmarkBits.bitp(objIndex)
@@ -110,3 +115,104 @@ func (w *gcWork) flushScanStats(dst *[gc.NumSizeClasses]sizeClassScanStats) {
        }
        clear(w.stats[:])
 }
+
+// scanobject scans the object starting at b, adding pointers to gcw.
+// b must point to the beginning of a heap object or an oblet.
+// scanobject consults the GC bitmap for the pointer mask and the
+// spans for the size of the object.
+//
+//go:nowritebarrier
+func scanobject(b uintptr, gcw *gcWork) {
+       // Prefetch object before we scan it.
+       //
+       // This will overlap fetching the beginning of the object with initial
+       // setup before we start scanning the object.
+       sys.Prefetch(b)
+
+       // Find the bits for b and the size of the object at b.
+       //
+       // b is either the beginning of an object, in which case this
+       // is the size of the object to scan, or it points to an
+       // oblet, in which case we compute the size to scan below.
+       s := spanOfUnchecked(b)
+       n := s.elemsize
+       if n == 0 {
+               throw("scanobject n == 0")
+       }
+       if s.spanclass.noscan() {
+               // Correctness-wise this is ok, but it's inefficient
+               // if noscan objects reach here.
+               throw("scanobject of a noscan object")
+       }
+
+       var tp typePointers
+       if n > maxObletBytes {
+               // Large object. Break into oblets for better
+               // parallelism and lower latency.
+               if b == s.base() {
+                       // Enqueue the other oblets to scan later.
+                       // Some oblets may be in b's scalar tail, but
+                       // these will be marked as "no more pointers",
+                       // so we'll drop out immediately when we go to
+                       // scan those.
+                       for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
+                               if !gcw.putObjFast(oblet) {
+                                       gcw.putObj(oblet)
+                               }
+                       }
+               }
+
+               // Compute the size of the oblet. Since this object
+               // must be a large object, s.base() is the beginning
+               // of the object.
+               n = s.base() + s.elemsize - b
+               n = min(n, maxObletBytes)
+               tp = s.typePointersOfUnchecked(s.base())
+               tp = tp.fastForward(b-tp.addr, b+n)
+       } else {
+               tp = s.typePointersOfUnchecked(b)
+       }
+
+       var scanSize uintptr
+       for {
+               var addr uintptr
+               if tp, addr = tp.nextFast(); addr == 0 {
+                       if tp, addr = tp.next(b + n); addr == 0 {
+                               break
+                       }
+               }
+
+               // Keep track of farthest pointer we found, so we can
+               // update heapScanWork. TODO: is there a better metric,
+               // now that we can skip scalar portions pretty efficiently?
+               scanSize = addr - b + goarch.PtrSize
+
+               // Work here is duplicated in scanblock and above.
+               // If you make changes here, make changes there too.
+               obj := *(*uintptr)(unsafe.Pointer(addr))
+
+               // At this point we have extracted the next potential pointer.
+               // Quickly filter out nil and pointers back to the current object.
+               if obj != 0 && obj-b >= n {
+                       // Test if obj points into the Go heap and, if so,
+                       // mark the object.
+                       //
+                       // Note that it's possible for findObject to
+                       // fail if obj points to a just-allocated heap
+                       // object because of a race with growing the
+                       // heap. In this case, we know the object was
+                       // just allocated and hence will be marked by
+                       // allocation itself.
+                       if !tryDeferToSpanScan(obj, gcw) {
+                               if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 {
+                                       greyobject(obj, b, addr-b, span, gcw, objIndex)
+                               }
+                       }
+               }
+       }
+       gcw.bytesMarked += uint64(n)
+       gcw.heapScanWork += int64(scanSize)
+       if debug.gctrace > 1 {
+               gcw.stats[s.spanclass.sizeclass()].sparseObjsScanned++
+       }
+}