]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: implement Stop for AddCleanup
authorCarlos Amedee <carlos@golang.org>
Thu, 14 Nov 2024 14:56:49 +0000 (09:56 -0500)
committerGopher Robot <gobot@golang.org>
Sat, 16 Nov 2024 03:54:51 +0000 (03:54 +0000)
This change adds the implementation for AddCleanup.Stop. It allows the
caller to cancel the call to execute the cleanup. Cleanup will not be
stopped if the cleanup has already been queued for execution.

For #67535

Change-Id: I494b77d344e54d772c41489d172286773c3814e5
Reviewed-on: https://go-review.googlesource.com/c/go/+/627975
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
Auto-Submit: Carlos Amedee <carlos@golang.org>

src/runtime/mcleanup.go
src/runtime/mcleanup_test.go
src/runtime/mheap.go

index 70df5755ce03b921e74e87c3570afe50a45bdcc1..db1a6ac67e2125b95d420860a4abbf5701d24203 100644 (file)
@@ -64,40 +64,91 @@ func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup {
        if debug.sbrk != 0 {
                // debug.sbrk never frees memory, so no cleanup will ever run
                // (and we don't have the data structures to record them).
-               // return a noop cleanup.
+               // Return a noop cleanup.
                return Cleanup{}
        }
 
        fn := func() {
                cleanup(arg)
        }
-       // closure must escape
+       // Closure must escape.
        fv := *(**funcval)(unsafe.Pointer(&fn))
        fv = abi.Escape(fv)
 
-       // find the containing object
+       // Find the containing object.
        base, _, _ := findObject(usptr, 0, 0)
        if base == 0 {
                if isGoPointerWithoutSpan(unsafe.Pointer(ptr)) {
+                       // Cleanup is a noop.
                        return Cleanup{}
                }
                throw("runtime.AddCleanup: ptr not in allocated block")
        }
 
-       // ensure we have a finalizer processing goroutine running.
+       // Ensure we have a finalizer processing goroutine running.
        createfing()
 
-       addCleanup(unsafe.Pointer(ptr), fv)
-       return Cleanup{}
+       id := addCleanup(unsafe.Pointer(ptr), fv)
+       return Cleanup{
+               id:  id,
+               ptr: usptr,
+       }
 }
 
 // Cleanup is a handle to a cleanup call for a specific object.
-type Cleanup struct{}
+type Cleanup struct {
+       // id is the unique identifier for the cleanup within the arena.
+       id uint64
+       // ptr contains the pointer to the object.
+       ptr uintptr
+}
 
 // Stop cancels the cleanup call. Stop will have no effect if the cleanup call
 // has already been queued for execution (because ptr became unreachable).
 // To guarantee that Stop removes the cleanup function, the caller must ensure
 // that the pointer that was passed to AddCleanup is reachable across the call to Stop.
-//
-// TODO(amedee) needs implementation.
-func (c Cleanup) Stop() {}
+func (c Cleanup) Stop() {
+       if c.id == 0 {
+               // id is set to zero when the cleanup is a noop.
+               return
+       }
+
+       // The following block removes the Special record of type cleanup for the object c.ptr.
+       span := spanOfHeap(uintptr(unsafe.Pointer(c.ptr)))
+       if span == nil {
+               return
+       }
+       // Ensure that the span is swept.
+       // Sweeping accesses the specials list w/o locks, so we have
+       // to synchronize with it. And it's just much safer.
+       mp := acquirem()
+       span.ensureSwept()
+
+       offset := uintptr(unsafe.Pointer(c.ptr)) - span.base()
+
+       var found *special
+       lock(&span.speciallock)
+
+       iter, exists := span.specialFindSplicePoint(offset, _KindSpecialCleanup)
+       if exists {
+               for s := *iter; s != nil && offset == uintptr(s.offset); iter = &s.next {
+                       if (*specialCleanup)(unsafe.Pointer(s)).id == c.id {
+                               *iter = s.next
+                               found = s
+                               break
+                       }
+               }
+       }
+       if span.specials == nil {
+               spanHasNoSpecials(span)
+       }
+       unlock(&span.speciallock)
+       releasem(mp)
+
+       if found == nil {
+               return
+       }
+       lock(&mheap_.speciallock)
+       mheap_.specialCleanupAlloc.free(unsafe.Pointer(found))
+       unlock(&mheap_.speciallock)
+}
index 66d58ef8a28d1cc144302bf54325a76921fc0403..8c2d1f06477fdd28263d061aab9177cad3372349 100644 (file)
@@ -157,3 +157,115 @@ func TestCleanupInteriorPointer(t *testing.T) {
        <-ch
        <-ch
 }
+
+func TestCleanupStop(t *testing.T) {
+       done := make(chan bool, 1)
+       go func() {
+               // allocate struct with pointer to avoid hitting tinyalloc.
+               // Otherwise we can't be sure when the allocation will
+               // be freed.
+               type T struct {
+                       v int
+                       p unsafe.Pointer
+               }
+               v := &new(T).v
+               *v = 97531
+               cleanup := func(x int) {
+                       t.Error("cleanup called, want no cleanup called")
+               }
+               c := runtime.AddCleanup(v, cleanup, 97531)
+               c.Stop()
+               v = nil
+               done <- true
+       }()
+       <-done
+       runtime.GC()
+}
+
+func TestCleanupStopMultiple(t *testing.T) {
+       done := make(chan bool, 1)
+       go func() {
+               // allocate struct with pointer to avoid hitting tinyalloc.
+               // Otherwise we can't be sure when the allocation will
+               // be freed.
+               type T struct {
+                       v int
+                       p unsafe.Pointer
+               }
+               v := &new(T).v
+               *v = 97531
+               cleanup := func(x int) {
+                       t.Error("cleanup called, want no cleanup called")
+               }
+               c := runtime.AddCleanup(v, cleanup, 97531)
+               c.Stop()
+               c.Stop()
+               c.Stop()
+               v = nil
+               done <- true
+       }()
+       <-done
+       runtime.GC()
+}
+
+func TestCleanupStopinterleavedMultiple(t *testing.T) {
+       ch := make(chan bool, 3)
+       done := make(chan bool, 1)
+       go func() {
+               // allocate struct with pointer to avoid hitting tinyalloc.
+               // Otherwise we can't be sure when the allocation will
+               // be freed.
+               type T struct {
+                       v int
+                       p unsafe.Pointer
+               }
+               v := &new(T).v
+               *v = 97531
+               cleanup := func(x int) {
+                       if x != 1 {
+                               t.Error("cleanup called, want no cleanup called")
+                       }
+                       ch <- true
+               }
+               runtime.AddCleanup(v, cleanup, 1)
+               runtime.AddCleanup(v, cleanup, 2).Stop()
+               runtime.AddCleanup(v, cleanup, 1)
+               runtime.AddCleanup(v, cleanup, 2).Stop()
+               runtime.AddCleanup(v, cleanup, 1)
+               v = nil
+               done <- true
+       }()
+       <-done
+       runtime.GC()
+       <-ch
+       <-ch
+       <-ch
+}
+
+func TestCleanupStopAfterCleanupRuns(t *testing.T) {
+       ch := make(chan bool, 1)
+       done := make(chan bool, 1)
+       var stop func()
+       go func() {
+               // Allocate struct with pointer to avoid hitting tinyalloc.
+               // Otherwise we can't be sure when the allocation will
+               // be freed.
+               type T struct {
+                       v int
+                       p unsafe.Pointer
+               }
+               v := &new(T).v
+               *v = 97531
+               cleanup := func(x int) {
+                       ch <- true
+               }
+               cl := runtime.AddCleanup(v, cleanup, 97531)
+               v = nil
+               stop = cl.Stop
+               done <- true
+       }()
+       <-done
+       runtime.GC()
+       <-ch
+       stop()
+}
index 72e7819b663359b2782053afe99d92ef79cfdffe..031c7ee9c316d9aa90cae4ff6972ded5e29d1af6 100644 (file)
@@ -231,6 +231,12 @@ type mheap struct {
                readyList mSpanList
        }
 
+       // cleanupID is a counter which is incremented each time a cleanup special is added
+       // to a span. It's used to create globally unique identifiers for individual cleanup.
+       // cleanupID is protected by mheap_.lock. It should only be incremented while holding
+       // the lock.
+       cleanupID uint64
+
        unused *specialfinalizer // never set, just here to force the specialfinalizer type into DWARF
 }
 
@@ -2020,16 +2026,23 @@ type specialCleanup struct {
        _       sys.NotInHeap
        special special
        fn      *funcval
+       // Globally unique ID for the cleanup, obtained from mheap_.cleanupID.
+       id uint64
 }
 
 // addCleanup attaches a cleanup function to the object. Multiple
 // cleanups are allowed on an object, and even the same pointer.
-func addCleanup(p unsafe.Pointer, f *funcval) {
+// A cleanup id is returned which can be used to uniquely identify
+// the cleanup.
+func addCleanup(p unsafe.Pointer, f *funcval) uint64 {
        lock(&mheap_.speciallock)
        s := (*specialCleanup)(mheap_.specialCleanupAlloc.alloc())
+       mheap_.cleanupID++
+       id := mheap_.cleanupID
        unlock(&mheap_.speciallock)
        s.special.kind = _KindSpecialCleanup
        s.fn = f
+       s.id = id
 
        mp := acquirem()
        addspecial(p, &s.special, true)
@@ -2044,6 +2057,7 @@ func addCleanup(p unsafe.Pointer, f *funcval) {
                // special isn't part of the GC'd heap.
                scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
        }
+       return id
 }
 
 // The described object has a weak pointer.