]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: panic if cleanup function closes over cleanup pointer
authorIan Lance Taylor <iant@golang.org>
Fri, 22 Aug 2025 20:47:42 +0000 (13:47 -0700)
committerGopher Robot <gobot@golang.org>
Wed, 26 Nov 2025 06:10:16 +0000 (22:10 -0800)
This would catch problems like https://go.dev/cl/696295.

Benchmark effect with this CL plus CL 697535:

goos: linux
goarch: amd64
pkg: runtime
cpu: 12th Gen Intel(R) Core(TM) i7-1260P
                     │ /tmp/foo.1  │             /tmp/foo.2             │
                     │   sec/op    │   sec/op     vs base               │
AddCleanupAndStop-16   81.93n ± 1%   82.87n ± 1%  +1.14% (p=0.041 n=10)

For #75066

Change-Id: Ia41d0d6b88baebf6055cb7e5d42bc8210b31630f
Reviewed-on: https://go-review.googlesource.com/c/go/+/714000
Reviewed-by: Michael Pratt <mpratt@google.com>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
Auto-Submit: Ian Lance Taylor <iant@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>

src/runtime/mcleanup.go
src/runtime/mcleanup_test.go
src/runtime/testdata/testprog/checkfinalizers.go

index d190653213fae6c8467d85552bc7c4f46ff4e2b1..e6f2fb00b3e76ced4b4d67235d3004d1651fbe57 100644 (file)
@@ -71,7 +71,14 @@ import (
 // mentions it. To ensure a cleanup does not get called prematurely,
 // pass the object to the [KeepAlive] function after the last point
 // where the object must remain reachable.
+//
+//go:nocheckptr
 func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup {
+       // This is marked nocheckptr because checkptr doesn't understand the
+       // pointer manipulation done when looking at closure pointers.
+       // Similar code in mbitmap.go works because the functions are
+       // go:nosplit, which implies go:nocheckptr (CL 202158).
+
        // Explicitly force ptr and cleanup to escape to the heap.
        ptr = abi.Escape(ptr)
        cleanup = abi.Escape(cleanup)
@@ -145,6 +152,23 @@ func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup {
                }
        }
 
+       // Check that the cleanup function doesn't close over the pointer.
+       cleanupFV := unsafe.Pointer(*(**funcval)(unsafe.Pointer(&cleanup)))
+       cBase, cSpan, _ := findObject(uintptr(cleanupFV), 0, 0)
+       if cBase != 0 {
+               tp := cSpan.typePointersOfUnchecked(cBase)
+               for {
+                       var addr uintptr
+                       if tp, addr = tp.next(cBase + cSpan.elemsize); addr == 0 {
+                               break
+                       }
+                       ptr := *(*uintptr)(unsafe.Pointer(addr))
+                       if ptr >= base && ptr < base+span.elemsize {
+                               panic("runtime.AddCleanup: cleanup function closes over ptr, cleanup will never run")
+                       }
+               }
+       }
+
        // Create another G if necessary.
        if gcCleanups.needG() {
                gcCleanups.createGs()
index 28fc7f407fbe5ebb1012ad7fcd3491a4f01567ad..5afe85e103670b151b8f8de79da097a5aaab72f5 100644 (file)
@@ -337,7 +337,7 @@ func TestCleanupLost(t *testing.T) {
        }
 }
 
-func TestCleanupUnreachable(t *testing.T) {
+func TestCleanupUnreachablePointer(t *testing.T) {
        defer func() {
                if recover() == nil {
                        t.Error("AddCleanup failed to detect self-pointer")
@@ -354,6 +354,24 @@ func TestCleanupUnreachable(t *testing.T) {
        }, &v.f)
 }
 
+func TestCleanupUnreachableClosure(t *testing.T) {
+       defer func() {
+               if recover() == nil {
+                       t.Error("AddCleanup failed to detect closure pointer")
+               }
+       }()
+
+       type T struct {
+               p *byte // use *byte to avoid tiny allocator
+               f int
+       }
+       v := &T{}
+       runtime.AddCleanup(v, func(int) {
+               t.Log(v.f)
+               t.Error("cleanup ran unexpectedly")
+       }, 0)
+}
+
 // BenchmarkAddCleanupAndStop benchmarks adding and removing a cleanup
 // from the same allocation.
 //
index ea352a4e3e92f1b889a371566561293672a0447c..9f082a25ff1a9d7a07fdb4fd1a241daaa52110ad 100644 (file)
@@ -28,17 +28,40 @@ func DetectFinalizerAndCleanupLeaks() {
 
        // Leak a cleanup.
        cLeak := new(T)
+
+       // Use an extra closure to avoid the simple
+       // checking done by AddCleanup.
+       var closeOverCLeak func(int)
+       closeOverCLeak = func(x int) {
+               // Use recursion to avoid inlining.
+               if x <= 0 {
+                       **cLeak = x
+               } else {
+                       closeOverCLeak(x - 1)
+               }
+       }
+
        runtime.AddCleanup(cLeak, func(x int) {
-               **cLeak = x
+               closeOverCLeak(x)
        }, int(0))
 
        // Have a regular cleanup to make sure it doesn't trip the detector.
        cNoLeak := new(T)
        runtime.AddCleanup(cNoLeak, func(_ int) {}, int(0))
 
+       // Like closeOverCLeak.
+       var closeOverCNoLeak func(int)
+       closeOverCNoLeak = func(x int) {
+               if x <= 0 {
+                       **cNoLeak = x
+               } else {
+                       closeOverCNoLeak(x - 1)
+               }
+       }
+
        // Add a cleanup that only temporarily leaks cNoLeak.
        runtime.AddCleanup(cNoLeak, func(x int) {
-               **cNoLeak = x
+               closeOverCNoLeak(x)
        }, int(0)).Stop()
 
        if !asan.Enabled && !race.Enabled {