func checkFinalizersAndCleanups() {
assertWorldStopped()
- failed := false
+ type report struct {
+ ptr uintptr
+ sp *special
+ }
+ var reports [25]report
+ var nreports int
+ var more bool
+
forEachSpecial(func(p uintptr, s *mspan, sp *special) bool {
// We only care about finalizers and cleanups.
if sp.kind != _KindSpecialFinalizer && sp.kind != _KindSpecialCleanup {
return true
}
if atomic.Load8(bytep)&mask != 0 {
- if !failed {
- println("runtime: found possibly unreclaimable objects:")
+ if nreports >= len(reports) {
+ more = true
+ return false
}
- failed = true
- kind := "cleanup"
- if sp.kind == _KindSpecialFinalizer {
+ reports[nreports] = report{p, sp}
+ nreports++
+ }
+ return true
+ })
+
+ if nreports > 0 {
+ lastPtr := uintptr(0)
+ for _, r := range reports[:nreports] {
+ var ctx *specialCheckFinalizer
+ var kind string
+ if r.sp.kind == _KindSpecialFinalizer {
kind = "finalizer"
+ ctx = getCleanupContext(r.ptr, 0)
+ } else {
+ kind = "cleanup"
+ ctx = getCleanupContext(r.ptr, ((*specialCleanup)(unsafe.Pointer(r.sp))).id)
+ }
+
+ // N.B. reports is sorted 'enough' that cleanups/finalizers on the same pointer will
+ // appear consecutively because the specials list is sorted.
+ if lastPtr != r.ptr {
+ if lastPtr != 0 {
+ println()
+ }
+ print("runtime: value of type ", toRType(ctx.ptrType).string(), " @ ", hex(r.ptr), " is reachable from cleanup or finalizer\n")
+ println("value reachable from function or argument at one of:")
}
- print("\t0x", hex(p), " leaked due to a ", kind)
- if sp.kind == _KindSpecialFinalizer {
- spf := (*specialfinalizer)(unsafe.Pointer(sp))
- print(" (", (rtype{spf.fint}).string(), ")\n")
+
+ funcInfo := findfunc(ctx.funcPC)
+ if funcInfo.valid() {
+ file, line := funcline(funcInfo, ctx.createPC)
+ print(funcname(funcInfo), " (", kind, ")\n")
+ print("\t", file, ":", line, "\n")
+ } else {
+ print("<bad pc ", hex(ctx.funcPC), ">\n")
+ }
+
+ print("created at: ")
+ createInfo := findfunc(ctx.createPC)
+ if createInfo.valid() {
+ file, line := funcline(createInfo, ctx.createPC)
+ print(funcname(createInfo), "\n")
+ print("\t", file, ":", line, "\n")
} else {
- println()
+ print("<bad pc ", hex(ctx.createPC), ">\n")
}
+
+ lastPtr = r.ptr
}
- return true
- })
- if failed {
- throw("runtime: detected possible cleanup and/or finalizer leak")
+ println()
+ if more {
+ println("runtime: too many errors")
+ }
+ throw("runtime: detected possible cleanup and/or finalizer leaks")
}
}
pad [(cpu.CacheLinePadSize - unsafe.Sizeof(mcentral{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
}
- spanalloc fixalloc // allocator for span*
- cachealloc fixalloc // allocator for mcache*
- specialfinalizeralloc fixalloc // allocator for specialfinalizer*
- specialCleanupAlloc fixalloc // allocator for specialcleanup*
- specialprofilealloc fixalloc // allocator for specialprofile*
- specialReachableAlloc fixalloc // allocator for specialReachable
- specialPinCounterAlloc fixalloc // allocator for specialPinCounter
- specialWeakHandleAlloc fixalloc // allocator for specialWeakHandle
- speciallock mutex // lock for special record allocators.
- arenaHintAlloc fixalloc // allocator for arenaHints
+ spanalloc fixalloc // allocator for span*
+ cachealloc fixalloc // allocator for mcache*
+ specialfinalizeralloc fixalloc // allocator for specialfinalizer*
+ specialCleanupAlloc fixalloc // allocator for specialCleanup*
+ specialCheckFinalizerAlloc fixalloc // allocator for specialCheckFinalizer*
+ specialprofilealloc fixalloc // allocator for specialprofile*
+ specialReachableAlloc fixalloc // allocator for specialReachable
+ specialPinCounterAlloc fixalloc // allocator for specialPinCounter
+ specialWeakHandleAlloc fixalloc // allocator for specialWeakHandle
+ speciallock mutex // lock for special record allocators.
+ arenaHintAlloc fixalloc // allocator for arenaHints
// User arena state.
//
// cleanupID is a counter which is incremented each time a cleanup special is added
// to a span. It's used to create globally unique identifiers for individual cleanup.
- // cleanupID is protected by mheap_.lock. It should only be incremented while holding
- // the lock.
+ // cleanupID is protected by mheap_.speciallock. It must only be incremented while holding
+ // the lock. ID 0 is reserved. Users should increment first, then read the value.
cleanupID uint64
_ cpu.CacheLinePad
h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
h.specialCleanupAlloc.init(unsafe.Sizeof(specialCleanup{}), nil, nil, &memstats.other_sys)
+ h.specialCheckFinalizerAlloc.init(unsafe.Sizeof(specialCheckFinalizer{}), nil, nil, &memstats.other_sys)
h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
h.specialReachableAlloc.init(unsafe.Sizeof(specialReachable{}), nil, nil, &memstats.other_sys)
h.specialPinCounterAlloc.init(unsafe.Sizeof(specialPinCounter{}), nil, nil, &memstats.other_sys)
_KindSpecialPinCounter = 5
// _KindSpecialCleanup is for tracking cleanups.
_KindSpecialCleanup = 6
+ // _KindSpecialCheckFinalizer adds additional context to a finalizer or cleanup.
+ // Used only if debug.checkfinalizers != 0.
+ _KindSpecialCheckFinalizer = 7
)
type special struct {
func addCleanup(p unsafe.Pointer, f *funcval) uint64 {
lock(&mheap_.speciallock)
s := (*specialCleanup)(mheap_.specialCleanupAlloc.alloc())
- mheap_.cleanupID++
+ mheap_.cleanupID++ // Increment first. ID 0 is reserved.
id := mheap_.cleanupID
unlock(&mheap_.speciallock)
s.special.kind = _KindSpecialCleanup
return id
}
+// Always paired with a specialCleanup or specialfinalizer, adds context.
+type specialCheckFinalizer struct {
+ _ sys.NotInHeap
+ special special
+ cleanupID uint64 // Needed to disambiguate cleanups.
+ createPC uintptr
+ funcPC uintptr
+ ptrType *_type
+}
+
+// setFinalizerContext adds a specialCheckFinalizer to ptr. ptr must already have a
+// finalizer special attached.
+func setFinalizerContext(ptr unsafe.Pointer, ptrType *_type, createPC, funcPC uintptr) {
+ setCleanupContext(ptr, ptrType, createPC, funcPC, 0)
+}
+
+// setCleanupContext adds a specialCheckFinalizer to ptr. ptr must already have a
+// finalizer or cleanup special attached. Pass 0 for the cleanupID to indicate
+// a finalizer.
+func setCleanupContext(ptr unsafe.Pointer, ptrType *_type, createPC, funcPC uintptr, cleanupID uint64) {
+ lock(&mheap_.speciallock)
+ s := (*specialCheckFinalizer)(mheap_.specialCheckFinalizerAlloc.alloc())
+ unlock(&mheap_.speciallock)
+ s.special.kind = _KindSpecialCheckFinalizer
+ s.cleanupID = cleanupID
+ s.createPC = createPC
+ s.funcPC = funcPC
+ s.ptrType = ptrType
+
+ mp := acquirem()
+ addspecial(ptr, &s.special, true)
+ releasem(mp)
+ KeepAlive(ptr)
+}
+
+func getCleanupContext(ptr uintptr, cleanupID uint64) *specialCheckFinalizer {
+ assertWorldStopped()
+
+ span := spanOfHeap(ptr)
+ if span == nil {
+ return nil
+ }
+ var found *specialCheckFinalizer
+ offset := ptr - span.base()
+ iter, exists := span.specialFindSplicePoint(offset, _KindSpecialCheckFinalizer)
+ if exists {
+ for {
+ s := *iter
+ if s == nil {
+ // Reached the end of the linked list. Stop searching at this point.
+ break
+ }
+ if offset == uintptr(s.offset) && _KindSpecialCheckFinalizer == s.kind &&
+ (*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID {
+ // The special is a cleanup and contains a matching cleanup id.
+ *iter = s.next
+ found = (*specialCheckFinalizer)(unsafe.Pointer(s))
+ break
+ }
+ if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCheckFinalizer < s.kind) {
+ // The special is outside the region specified for that kind of
+ // special. The specials are sorted by kind.
+ break
+ }
+ // Try the next special.
+ iter = &s.next
+ }
+ }
+ return found
+}
+
+// clearFinalizerContext removes the specialCheckFinalizer for the given pointer, if any.
+func clearFinalizerContext(ptr uintptr) {
+ clearCleanupContext(ptr, 0)
+}
+
+// clearFinalizerContext removes the specialCheckFinalizer for the given pointer and cleanup ID, if any.
+func clearCleanupContext(ptr uintptr, cleanupID uint64) {
+ // The following block removes the Special record of type cleanup for the object c.ptr.
+ span := spanOfHeap(ptr)
+ if span == nil {
+ return
+ }
+ // Ensure that the span is swept.
+ // Sweeping accesses the specials list w/o locks, so we have
+ // to synchronize with it. And it's just much safer.
+ mp := acquirem()
+ span.ensureSwept()
+
+ offset := ptr - span.base()
+
+ var found *special
+ lock(&span.speciallock)
+
+ iter, exists := span.specialFindSplicePoint(offset, _KindSpecialCheckFinalizer)
+ if exists {
+ for {
+ s := *iter
+ if s == nil {
+ // Reached the end of the linked list. Stop searching at this point.
+ break
+ }
+ if offset == uintptr(s.offset) && _KindSpecialCheckFinalizer == s.kind &&
+ (*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID {
+ // The special is a cleanup and contains a matching cleanup id.
+ *iter = s.next
+ found = s
+ break
+ }
+ if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCheckFinalizer < s.kind) {
+ // The special is outside the region specified for that kind of
+ // special. The specials are sorted by kind.
+ break
+ }
+ // Try the next special.
+ iter = &s.next
+ }
+ }
+ if span.specials == nil {
+ spanHasNoSpecials(span)
+ }
+ unlock(&span.speciallock)
+ releasem(mp)
+
+ if found == nil {
+ return
+ }
+ lock(&mheap_.speciallock)
+ mheap_.specialCheckFinalizerAlloc.free(unsafe.Pointer(found))
+ unlock(&mheap_.speciallock)
+}
+
// The described object has a weak pointer.
//
// Weak pointers in the GC have the following invariants:
lock(&mheap_.speciallock)
mheap_.specialCleanupAlloc.free(unsafe.Pointer(sc))
unlock(&mheap_.speciallock)
+ case _KindSpecialCheckFinalizer:
+ sc := (*specialCheckFinalizer)(unsafe.Pointer(s))
+ lock(&mheap_.speciallock)
+ mheap_.specialCheckFinalizerAlloc.free(unsafe.Pointer(sc))
+ unlock(&mheap_.speciallock)
default:
throw("bad special kind")
panic("not reached")