func TestDetectFinalizerAndCleanupLeaks(t *testing.T) {
got := runTestProg(t, "testprog", "DetectFinalizerAndCleanupLeaks", "GODEBUG=checkfinalizers=1")
- sp := strings.SplitN(got, "runtime: detected", 2)
+ sp := strings.SplitN(got, "detected possible issues with cleanups and/or finalizers", 2)
if len(sp) != 2 {
t.Fatalf("expected the runtime to throw, got:\n%s", got)
}
- if strings.Count(sp[0], "is reachable from cleanup or finalizer") != 2 {
+ if strings.Count(sp[0], "is reachable from") != 2 {
t.Fatalf("expected exactly two leaked cleanups and/or finalizers, got:\n%s", got)
}
- if strings.Count(sp[0], "created at: main.DetectFinalizerAndCleanupLeaks") != 2 {
- t.Fatalf("expected two symbolized locations, got:\n%s", got)
+ // N.B. Disable in race mode and in asan mode. Both disable the tiny allocator.
+ wantSymbolizedLocations := 2
+ if !race.Enabled && !asan.Enabled {
+ if strings.Count(sp[0], "is in a tiny block") != 1 {
+ t.Fatalf("expected exactly one report for allocation in a tiny block, got:\n%s", got)
+ }
+ wantSymbolizedLocations++
+ }
+ if strings.Count(sp[0], "main.DetectFinalizerAndCleanupLeaks()") != wantSymbolizedLocations {
+ t.Fatalf("expected %d symbolized locations, got:\n%s", wantSymbolizedLocations, got)
}
}
traceRelease(trace)
}
}
+
+ // N.B. elemsize == 0 indicates a tiny allocation, since no new slot was
+ // allocated to fulfill this call to mallocgc.
+ if debug.checkfinalizers != 0 && elemsize == 0 {
+ setTinyBlockContext(unsafe.Pointer(alignDown(uintptr(x), maxTinySize)))
+ }
}
// deductAssistCredit reduces the current G's assist credit
func checkFinalizersAndCleanups() {
assertWorldStopped()
+ const (
+ reportCycle = 1 << iota
+ reportTiny
+ )
+
+ // Find the arena and page index into that arena for this shard.
type report struct {
- ptr uintptr
- sp *special
+ issues int
+ ptr uintptr
+ sp *special
}
- var reports [25]report
+ var reports [50]report
var nreports int
var more bool
+ var lastTinyBlock uintptr
forEachSpecial(func(p uintptr, s *mspan, sp *special) bool {
+ // N.B. The tiny block specials are sorted first in the specials list.
+ if sp.kind == _KindSpecialTinyBlock {
+ lastTinyBlock = s.base() + sp.offset
+ return true
+ }
+
// We only care about finalizers and cleanups.
if sp.kind != _KindSpecialFinalizer && sp.kind != _KindSpecialCleanup {
return true
if bytep == nil {
return true
}
+ var issues int
if atomic.Load8(bytep)&mask != 0 {
+ issues |= reportCycle
+ }
+ if p >= lastTinyBlock && p < lastTinyBlock+maxTinySize {
+ issues |= reportTiny
+ }
+ if issues != 0 {
if nreports >= len(reports) {
more = true
return false
}
- reports[nreports] = report{p, sp}
+ reports[nreports] = report{issues, p, sp}
nreports++
}
return true
if nreports > 0 {
lastPtr := uintptr(0)
+ println("WARNING: LIKELY CLEANUP/FINALIZER ISSUES")
+ println()
for _, r := range reports[:nreports] {
var ctx *specialCheckFinalizer
var kind string
if lastPtr != 0 {
println()
}
- print("runtime: value of type ", toRType(ctx.ptrType).string(), " @ ", hex(r.ptr), " is reachable from cleanup or finalizer\n")
- println("value reachable from function or argument at one of:")
+ print("Value of type ", toRType(ctx.ptrType).string(), " at ", hex(r.ptr), "\n")
+ if r.issues&reportCycle != 0 {
+ if r.sp.kind == _KindSpecialFinalizer {
+ println(" is reachable from finalizer")
+ } else {
+ println(" is reachable from cleanup or cleanup argument")
+ }
+ }
+ if r.issues&reportTiny != 0 {
+ println(" is in a tiny block with other (possibly long-lived) values")
+ }
+ if r.issues&reportTiny != 0 && r.issues&reportCycle != 0 {
+ if r.sp.kind == _KindSpecialFinalizer {
+ println(" may be in the same tiny block as finalizer")
+ } else {
+ println(" may be in the same tiny block as cleanup or cleanup argument")
+ }
+ }
}
+ println()
+ println("Has", kind, "at", hex(uintptr(unsafe.Pointer(r.sp))))
funcInfo := findfunc(ctx.funcPC)
if funcInfo.valid() {
- file, line := funcline(funcInfo, ctx.createPC)
- print(funcname(funcInfo), " (", kind, ")\n")
- print("\t", file, ":", line, "\n")
+ file, line := funcline(funcInfo, ctx.funcPC)
+ print(" ", funcname(funcInfo), "()\n")
+ print(" ", file, ":", line, " +", hex(ctx.funcPC-funcInfo.entry()), "\n")
} else {
- print("<bad pc ", hex(ctx.funcPC), ">\n")
+ print(" <bad pc ", hex(ctx.funcPC), ">\n")
}
- print("created at: ")
+ println("created at: ")
createInfo := findfunc(ctx.createPC)
if createInfo.valid() {
file, line := funcline(createInfo, ctx.createPC)
- print(funcname(createInfo), "\n")
- print("\t", file, ":", line, "\n")
+ print(" ", funcname(createInfo), "()\n")
+ print(" ", file, ":", line, " +", hex(ctx.createPC-createInfo.entry()), "\n")
} else {
- print("<bad pc ", hex(ctx.createPC), ">\n")
+ print(" <bad pc ", hex(ctx.createPC), ">\n")
}
lastPtr = r.ptr
}
println()
if more {
- println("runtime: too many errors")
+ println("... too many potential issues ...")
}
- throw("runtime: detected possible cleanup and/or finalizer leaks")
+ throw("detected possible issues with cleanups and/or finalizers")
}
}
specialfinalizeralloc fixalloc // allocator for specialfinalizer*
specialCleanupAlloc fixalloc // allocator for specialCleanup*
specialCheckFinalizerAlloc fixalloc // allocator for specialCheckFinalizer*
+ specialTinyBlockAlloc fixalloc // allocator for specialTinyBlock*
specialprofilealloc fixalloc // allocator for specialprofile*
specialReachableAlloc fixalloc // allocator for specialReachable
specialPinCounterAlloc fixalloc // allocator for specialPinCounter
h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
h.specialCleanupAlloc.init(unsafe.Sizeof(specialCleanup{}), nil, nil, &memstats.other_sys)
h.specialCheckFinalizerAlloc.init(unsafe.Sizeof(specialCheckFinalizer{}), nil, nil, &memstats.other_sys)
+ h.specialTinyBlockAlloc.init(unsafe.Sizeof(specialTinyBlock{}), nil, nil, &memstats.other_sys)
h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
h.specialReachableAlloc.init(unsafe.Sizeof(specialReachable{}), nil, nil, &memstats.other_sys)
h.specialPinCounterAlloc.init(unsafe.Sizeof(specialPinCounter{}), nil, nil, &memstats.other_sys)
}
const (
+ // _KindSpecialTinyBlock indicates that a given allocation is a tiny block.
+ // Ordered before KindSpecialFinalizer and KindSpecialCleanup so that it
+ // always appears first in the specials list.
+ // Used only if debug.checkfinalizers != 0.
+ _KindSpecialTinyBlock = 1
// _KindSpecialFinalizer is for tracking finalizers.
- _KindSpecialFinalizer = 1
+ _KindSpecialFinalizer = 2
// _KindSpecialWeakHandle is used for creating weak pointers.
- _KindSpecialWeakHandle = 2
+ _KindSpecialWeakHandle = 3
// _KindSpecialProfile is for memory profiling.
- _KindSpecialProfile = 3
+ _KindSpecialProfile = 4
// _KindSpecialReachable is a special used for tracking
// reachability during testing.
- _KindSpecialReachable = 4
+ _KindSpecialReachable = 5
// _KindSpecialPinCounter is a special used for objects that are pinned
// multiple times
- _KindSpecialPinCounter = 5
+ _KindSpecialPinCounter = 6
// _KindSpecialCleanup is for tracking cleanups.
- _KindSpecialCleanup = 6
+ _KindSpecialCleanup = 7
// _KindSpecialCheckFinalizer adds additional context to a finalizer or cleanup.
// Used only if debug.checkfinalizers != 0.
- _KindSpecialCheckFinalizer = 7
+ _KindSpecialCheckFinalizer = 8
)
type special struct {
unlock(&mheap_.speciallock)
}
+// Indicates that an allocation is a tiny block.
+// Used only if debug.checkfinalizers != 0.
+type specialTinyBlock struct {
+ _ sys.NotInHeap
+ special special
+}
+
+// setTinyBlockContext marks an allocation as a tiny block to diagnostics like
+// checkfinalizer.
+//
+// A tiny block is only marked if it actually contains more than one distinct
+// value, since we're using this for debugging.
+func setTinyBlockContext(ptr unsafe.Pointer) {
+ lock(&mheap_.speciallock)
+ s := (*specialTinyBlock)(mheap_.specialTinyBlockAlloc.alloc())
+ unlock(&mheap_.speciallock)
+ s.special.kind = _KindSpecialTinyBlock
+
+ mp := acquirem()
+ addspecial(ptr, &s.special, false)
+ releasem(mp)
+ KeepAlive(ptr)
+}
+
+// inTinyBlock returns whether ptr is in a tiny alloc block, at one point grouped
+// with other distinct values.
+func inTinyBlock(ptr uintptr) bool {
+ assertWorldStopped()
+
+ ptr = alignDown(ptr, maxTinySize)
+ span := spanOfHeap(ptr)
+ if span == nil {
+ return false
+ }
+ offset := ptr - span.base()
+ _, exists := span.specialFindSplicePoint(offset, _KindSpecialTinyBlock)
+ return exists
+}
+
// The described object has a weak pointer.
//
// Weak pointers in the GC have the following invariants:
lock(&mheap_.speciallock)
mheap_.specialCheckFinalizerAlloc.free(unsafe.Pointer(sc))
unlock(&mheap_.speciallock)
+ case _KindSpecialTinyBlock:
+ st := (*specialTinyBlock)(unsafe.Pointer(s))
+ lock(&mheap_.speciallock)
+ mheap_.specialTinyBlockAlloc.free(unsafe.Pointer(st))
+ unlock(&mheap_.speciallock)
default:
throw("bad special kind")
panic("not reached")
traceCheckStackOwnership int32
profstackdepth int32
dataindependenttiming int32
- checkfinalizers int32
// debug.malloc is used as a combined debug check
// in the malloc function and should be set
// if any of the below debug options is != 0.
- malloc bool
- inittrace int32
- sbrk int32
+ malloc bool
+ inittrace int32
+ sbrk int32
+ checkfinalizers int32
// traceallocfree controls whether execution traces contain
// detailed trace data about memory allocation. This value
// affects debug.malloc only if it is != 0 and the execution
// apply environment settings
parsegodebug(godebug, nil)
- debug.malloc = (debug.inittrace | debug.sbrk) != 0
+ debug.malloc = (debug.inittrace | debug.sbrk | debug.checkfinalizers) != 0
debug.profstackdepth = min(debug.profstackdepth, maxProfStackDepth)
// Disable async preemption in checkmark mode. The following situation is
register("DetectFinalizerAndCleanupLeaks", DetectFinalizerAndCleanupLeaks)
}
+type tiny uint8
+
+var tinySink *tiny
+
// Intended to be run only with `GODEBUG=checkfinalizers=1`.
func DetectFinalizerAndCleanupLeaks() {
type T *int
**cNoLeak = x
}, int(0)).Stop()
+ // Ensure we create an allocation into a tiny block that shares space among several values.
+ var ctLeak *tiny
+ for i := 0; i < 18; i++ {
+ tinySink = ctLeak
+ ctLeak = new(tiny)
+ *ctLeak = tiny(i)
+ }
+ runtime.AddCleanup(ctLeak, func(_ struct{}) {}, struct{}{})
+
// Leak a finalizer.
fLeak := new(T)
runtime.SetFinalizer(fLeak, func(_ *T) {
// runtime.GC here should crash.
runtime.GC()
println("OK")
-
- // Keep everything alive.
- runtime.KeepAlive(cLeak)
- runtime.KeepAlive(cNoLeak)
- runtime.KeepAlive(fLeak)
- runtime.KeepAlive(fNoLeak)
}