}
}
-// scanobject scans the object starting at b, adding pointers to gcw.
-// b must point to the beginning of a heap object or an oblet.
-// scanobject consults the GC bitmap for the pointer mask and the
-// spans for the size of the object.
-//
-//go:nowritebarrier
-func scanobject(b uintptr, gcw *gcWork) {
- // Prefetch object before we scan it.
- //
- // This will overlap fetching the beginning of the object with initial
- // setup before we start scanning the object.
- sys.Prefetch(b)
-
- // Find the bits for b and the size of the object at b.
- //
- // b is either the beginning of an object, in which case this
- // is the size of the object to scan, or it points to an
- // oblet, in which case we compute the size to scan below.
- s := spanOfUnchecked(b)
- n := s.elemsize
- if n == 0 {
- throw("scanobject n == 0")
- }
- if s.spanclass.noscan() {
- // Correctness-wise this is ok, but it's inefficient
- // if noscan objects reach here.
- throw("scanobject of a noscan object")
- }
-
- var tp typePointers
- if n > maxObletBytes {
- // Large object. Break into oblets for better
- // parallelism and lower latency.
- if b == s.base() {
- // Enqueue the other oblets to scan later.
- // Some oblets may be in b's scalar tail, but
- // these will be marked as "no more pointers",
- // so we'll drop out immediately when we go to
- // scan those.
- for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
- if !gcw.putObjFast(oblet) {
- gcw.putObj(oblet)
- }
- }
- }
-
- // Compute the size of the oblet. Since this object
- // must be a large object, s.base() is the beginning
- // of the object.
- n = s.base() + s.elemsize - b
- n = min(n, maxObletBytes)
- tp = s.typePointersOfUnchecked(s.base())
- tp = tp.fastForward(b-tp.addr, b+n)
- } else {
- tp = s.typePointersOfUnchecked(b)
- }
-
- var scanSize uintptr
- for {
- var addr uintptr
- if tp, addr = tp.nextFast(); addr == 0 {
- if tp, addr = tp.next(b + n); addr == 0 {
- break
- }
- }
-
- // Keep track of farthest pointer we found, so we can
- // update heapScanWork. TODO: is there a better metric,
- // now that we can skip scalar portions pretty efficiently?
- scanSize = addr - b + goarch.PtrSize
-
- // Work here is duplicated in scanblock and above.
- // If you make changes here, make changes there too.
- obj := *(*uintptr)(unsafe.Pointer(addr))
-
- // At this point we have extracted the next potential pointer.
- // Quickly filter out nil and pointers back to the current object.
- if obj != 0 && obj-b >= n {
- // Test if obj points into the Go heap and, if so,
- // mark the object.
- //
- // Note that it's possible for findObject to
- // fail if obj points to a just-allocated heap
- // object because of a race with growing the
- // heap. In this case, we know the object was
- // just allocated and hence will be marked by
- // allocation itself.
- if !tryDeferToSpanScan(obj, gcw) {
- if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 {
- greyobject(obj, b, addr-b, span, gcw, objIndex)
- }
- }
- }
- }
- gcw.bytesMarked += uint64(n)
- gcw.heapScanWork += int64(scanSize)
- if debug.gctrace > 1 {
- gcw.stats[s.spanclass.sizeclass()].sparseObjsScanned++
- }
-}
-
// scanConservative scans block [b, b+n) conservatively, treating any
// pointer-like value in the block as a pointer.
//
}
clear(w.stats[:])
}
+
+// scanobject scans the object starting at b, adding pointers to gcw.
+// b must point to the beginning of a heap object or an oblet.
+// scanobject consults the GC bitmap for the pointer mask and the
+// spans for the size of the object.
+//
+// Used only for !gcUsesSpanInlineMarkBits spans, but supports all
+// object sizes and is safe to be called on all heap objects.
+//
+//go:nowritebarrier
+func scanobject(b uintptr, gcw *gcWork) {
+ // Prefetch object before we scan it.
+ //
+ // This will overlap fetching the beginning of the object with initial
+ // setup before we start scanning the object.
+ sys.Prefetch(b)
+
+ // Find the bits for b and the size of the object at b.
+ //
+ // b is either the beginning of an object, in which case this
+ // is the size of the object to scan, or it points to an
+ // oblet, in which case we compute the size to scan below.
+ s := spanOfUnchecked(b)
+ n := s.elemsize
+ if n == 0 {
+ throw("scanobject n == 0")
+ }
+ if s.spanclass.noscan() {
+ // Correctness-wise this is ok, but it's inefficient
+ // if noscan objects reach here.
+ throw("scanobject of a noscan object")
+ }
+
+ var tp typePointers
+ if n > maxObletBytes {
+ // Large object. Break into oblets for better
+ // parallelism and lower latency.
+ if b == s.base() {
+ // Enqueue the other oblets to scan later.
+ // Some oblets may be in b's scalar tail, but
+ // these will be marked as "no more pointers",
+ // so we'll drop out immediately when we go to
+ // scan those.
+ for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
+ if !gcw.putObjFast(oblet) {
+ gcw.putObj(oblet)
+ }
+ }
+ }
+
+ // Compute the size of the oblet. Since this object
+ // must be a large object, s.base() is the beginning
+ // of the object.
+ n = s.base() + s.elemsize - b
+ n = min(n, maxObletBytes)
+ tp = s.typePointersOfUnchecked(s.base())
+ tp = tp.fastForward(b-tp.addr, b+n)
+ } else {
+ tp = s.typePointersOfUnchecked(b)
+ }
+
+ var scanSize uintptr
+ for {
+ var addr uintptr
+ if tp, addr = tp.nextFast(); addr == 0 {
+ if tp, addr = tp.next(b + n); addr == 0 {
+ break
+ }
+ }
+
+ // Keep track of farthest pointer we found, so we can
+ // update heapScanWork. TODO: is there a better metric,
+ // now that we can skip scalar portions pretty efficiently?
+ scanSize = addr - b + goarch.PtrSize
+
+ // Work here is duplicated in scanblock and above.
+ // If you make changes here, make changes there too.
+ obj := *(*uintptr)(unsafe.Pointer(addr))
+
+ // At this point we have extracted the next potential pointer.
+ // Quickly filter out nil and pointers back to the current object.
+ if obj != 0 && obj-b >= n {
+ // Test if obj points into the Go heap and, if so,
+ // mark the object.
+ //
+ // Note that it's possible for findObject to
+ // fail if obj points to a just-allocated heap
+ // object because of a race with growing the
+ // heap. In this case, we know the object was
+ // just allocated and hence will be marked by
+ // allocation itself.
+ if !tryDeferToSpanScan(obj, gcw) {
+ if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 {
+ greyobject(obj, b, addr-b, span, gcw, objIndex)
+ }
+ }
+ }
+ }
+ gcw.bytesMarked += uint64(n)
+ gcw.heapScanWork += int64(scanSize)
+ if debug.gctrace > 1 {
+ gcw.stats[s.spanclass.sizeclass()].sparseObjsScanned++
+ }
+}
package runtime
-import "internal/runtime/gc"
+import (
+ "internal/goarch"
+ "internal/runtime/gc"
+ "internal/runtime/sys"
+ "unsafe"
+)
func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
bytep, mask := s.gcmarkBits.bitp(objIndex)
}
clear(w.stats[:])
}
+
+// scanobject scans the object starting at b, adding pointers to gcw.
+// b must point to the beginning of a heap object or an oblet.
+// scanobject consults the GC bitmap for the pointer mask and the
+// spans for the size of the object.
+//
+//go:nowritebarrier
+func scanobject(b uintptr, gcw *gcWork) {
+ // Prefetch object before we scan it.
+ //
+ // This will overlap fetching the beginning of the object with initial
+ // setup before we start scanning the object.
+ sys.Prefetch(b)
+
+ // Find the bits for b and the size of the object at b.
+ //
+ // b is either the beginning of an object, in which case this
+ // is the size of the object to scan, or it points to an
+ // oblet, in which case we compute the size to scan below.
+ s := spanOfUnchecked(b)
+ n := s.elemsize
+ if n == 0 {
+ throw("scanobject n == 0")
+ }
+ if s.spanclass.noscan() {
+ // Correctness-wise this is ok, but it's inefficient
+ // if noscan objects reach here.
+ throw("scanobject of a noscan object")
+ }
+
+ var tp typePointers
+ if n > maxObletBytes {
+ // Large object. Break into oblets for better
+ // parallelism and lower latency.
+ if b == s.base() {
+ // Enqueue the other oblets to scan later.
+ // Some oblets may be in b's scalar tail, but
+ // these will be marked as "no more pointers",
+ // so we'll drop out immediately when we go to
+ // scan those.
+ for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
+ if !gcw.putObjFast(oblet) {
+ gcw.putObj(oblet)
+ }
+ }
+ }
+
+ // Compute the size of the oblet. Since this object
+ // must be a large object, s.base() is the beginning
+ // of the object.
+ n = s.base() + s.elemsize - b
+ n = min(n, maxObletBytes)
+ tp = s.typePointersOfUnchecked(s.base())
+ tp = tp.fastForward(b-tp.addr, b+n)
+ } else {
+ tp = s.typePointersOfUnchecked(b)
+ }
+
+ var scanSize uintptr
+ for {
+ var addr uintptr
+ if tp, addr = tp.nextFast(); addr == 0 {
+ if tp, addr = tp.next(b + n); addr == 0 {
+ break
+ }
+ }
+
+ // Keep track of farthest pointer we found, so we can
+ // update heapScanWork. TODO: is there a better metric,
+ // now that we can skip scalar portions pretty efficiently?
+ scanSize = addr - b + goarch.PtrSize
+
+ // Work here is duplicated in scanblock and above.
+ // If you make changes here, make changes there too.
+ obj := *(*uintptr)(unsafe.Pointer(addr))
+
+ // At this point we have extracted the next potential pointer.
+ // Quickly filter out nil and pointers back to the current object.
+ if obj != 0 && obj-b >= n {
+ // Test if obj points into the Go heap and, if so,
+ // mark the object.
+ //
+ // Note that it's possible for findObject to
+ // fail if obj points to a just-allocated heap
+ // object because of a race with growing the
+ // heap. In this case, we know the object was
+ // just allocated and hence will be marked by
+ // allocation itself.
+ if !tryDeferToSpanScan(obj, gcw) {
+ if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 {
+ greyobject(obj, b, addr-b, span, gcw, objIndex)
+ }
+ }
+ }
+ }
+ gcw.bytesMarked += uint64(n)
+ gcw.heapScanWork += int64(scanSize)
+ if debug.gctrace > 1 {
+ gcw.stats[s.spanclass.sizeclass()].sparseObjsScanned++
+ }
+}