if debug.sbrk != 0 {
// debug.sbrk never frees memory, so no cleanup will ever run
// (and we don't have the data structures to record them).
- // return a noop cleanup.
+ // Return a noop cleanup.
return Cleanup{}
}
fn := func() {
cleanup(arg)
}
- // closure must escape
+ // Closure must escape.
fv := *(**funcval)(unsafe.Pointer(&fn))
fv = abi.Escape(fv)
- // find the containing object
+ // Find the containing object.
base, _, _ := findObject(usptr, 0, 0)
if base == 0 {
if isGoPointerWithoutSpan(unsafe.Pointer(ptr)) {
+ // Cleanup is a noop.
return Cleanup{}
}
throw("runtime.AddCleanup: ptr not in allocated block")
}
- // ensure we have a finalizer processing goroutine running.
+ // Ensure we have a finalizer processing goroutine running.
createfing()
- addCleanup(unsafe.Pointer(ptr), fv)
- return Cleanup{}
+ id := addCleanup(unsafe.Pointer(ptr), fv)
+ return Cleanup{
+ id: id,
+ ptr: usptr,
+ }
}
// Cleanup is a handle to a cleanup call for a specific object.
-type Cleanup struct{}
+type Cleanup struct {
+ // id is the unique identifier for the cleanup within the arena.
+ id uint64
+ // ptr contains the pointer to the object.
+ ptr uintptr
+}
// Stop cancels the cleanup call. Stop will have no effect if the cleanup call
// has already been queued for execution (because ptr became unreachable).
// To guarantee that Stop removes the cleanup function, the caller must ensure
// that the pointer that was passed to AddCleanup is reachable across the call to Stop.
-//
-// TODO(amedee) needs implementation.
-func (c Cleanup) Stop() {}
+func (c Cleanup) Stop() {
+ if c.id == 0 {
+ // id is set to zero when the cleanup is a noop.
+ return
+ }
+
+ // The following block removes the Special record of type cleanup for the object c.ptr.
+ span := spanOfHeap(uintptr(unsafe.Pointer(c.ptr)))
+ if span == nil {
+ return
+ }
+ // Ensure that the span is swept.
+ // Sweeping accesses the specials list w/o locks, so we have
+ // to synchronize with it. And it's just much safer.
+ mp := acquirem()
+ span.ensureSwept()
+
+ offset := uintptr(unsafe.Pointer(c.ptr)) - span.base()
+
+ var found *special
+ lock(&span.speciallock)
+
+ iter, exists := span.specialFindSplicePoint(offset, _KindSpecialCleanup)
+ if exists {
+ for s := *iter; s != nil && offset == uintptr(s.offset); iter = &s.next {
+ if (*specialCleanup)(unsafe.Pointer(s)).id == c.id {
+ *iter = s.next
+ found = s
+ break
+ }
+ }
+ }
+ if span.specials == nil {
+ spanHasNoSpecials(span)
+ }
+ unlock(&span.speciallock)
+ releasem(mp)
+
+ if found == nil {
+ return
+ }
+ lock(&mheap_.speciallock)
+ mheap_.specialCleanupAlloc.free(unsafe.Pointer(found))
+ unlock(&mheap_.speciallock)
+}
<-ch
<-ch
}
+
+func TestCleanupStop(t *testing.T) {
+ done := make(chan bool, 1)
+ go func() {
+ // allocate struct with pointer to avoid hitting tinyalloc.
+ // Otherwise we can't be sure when the allocation will
+ // be freed.
+ type T struct {
+ v int
+ p unsafe.Pointer
+ }
+ v := &new(T).v
+ *v = 97531
+ cleanup := func(x int) {
+ t.Error("cleanup called, want no cleanup called")
+ }
+ c := runtime.AddCleanup(v, cleanup, 97531)
+ c.Stop()
+ v = nil
+ done <- true
+ }()
+ <-done
+ runtime.GC()
+}
+
+func TestCleanupStopMultiple(t *testing.T) {
+ done := make(chan bool, 1)
+ go func() {
+ // allocate struct with pointer to avoid hitting tinyalloc.
+ // Otherwise we can't be sure when the allocation will
+ // be freed.
+ type T struct {
+ v int
+ p unsafe.Pointer
+ }
+ v := &new(T).v
+ *v = 97531
+ cleanup := func(x int) {
+ t.Error("cleanup called, want no cleanup called")
+ }
+ c := runtime.AddCleanup(v, cleanup, 97531)
+ c.Stop()
+ c.Stop()
+ c.Stop()
+ v = nil
+ done <- true
+ }()
+ <-done
+ runtime.GC()
+}
+
+func TestCleanupStopinterleavedMultiple(t *testing.T) {
+ ch := make(chan bool, 3)
+ done := make(chan bool, 1)
+ go func() {
+ // allocate struct with pointer to avoid hitting tinyalloc.
+ // Otherwise we can't be sure when the allocation will
+ // be freed.
+ type T struct {
+ v int
+ p unsafe.Pointer
+ }
+ v := &new(T).v
+ *v = 97531
+ cleanup := func(x int) {
+ if x != 1 {
+ t.Error("cleanup called, want no cleanup called")
+ }
+ ch <- true
+ }
+ runtime.AddCleanup(v, cleanup, 1)
+ runtime.AddCleanup(v, cleanup, 2).Stop()
+ runtime.AddCleanup(v, cleanup, 1)
+ runtime.AddCleanup(v, cleanup, 2).Stop()
+ runtime.AddCleanup(v, cleanup, 1)
+ v = nil
+ done <- true
+ }()
+ <-done
+ runtime.GC()
+ <-ch
+ <-ch
+ <-ch
+}
+
+func TestCleanupStopAfterCleanupRuns(t *testing.T) {
+ ch := make(chan bool, 1)
+ done := make(chan bool, 1)
+ var stop func()
+ go func() {
+ // Allocate struct with pointer to avoid hitting tinyalloc.
+ // Otherwise we can't be sure when the allocation will
+ // be freed.
+ type T struct {
+ v int
+ p unsafe.Pointer
+ }
+ v := &new(T).v
+ *v = 97531
+ cleanup := func(x int) {
+ ch <- true
+ }
+ cl := runtime.AddCleanup(v, cleanup, 97531)
+ v = nil
+ stop = cl.Stop
+ done <- true
+ }()
+ <-done
+ runtime.GC()
+ <-ch
+ stop()
+}
readyList mSpanList
}
+ // cleanupID is a counter which is incremented each time a cleanup special is added
+ // to a span. It's used to create globally unique identifiers for individual cleanup.
+ // cleanupID is protected by mheap_.lock. It should only be incremented while holding
+ // the lock.
+ cleanupID uint64
+
unused *specialfinalizer // never set, just here to force the specialfinalizer type into DWARF
}
_ sys.NotInHeap
special special
fn *funcval
+ // Globally unique ID for the cleanup, obtained from mheap_.cleanupID.
+ id uint64
}
// addCleanup attaches a cleanup function to the object. Multiple
// cleanups are allowed on an object, and even the same pointer.
-func addCleanup(p unsafe.Pointer, f *funcval) {
+// A cleanup id is returned which can be used to uniquely identify
+// the cleanup.
+func addCleanup(p unsafe.Pointer, f *funcval) uint64 {
lock(&mheap_.speciallock)
s := (*specialCleanup)(mheap_.specialCleanupAlloc.alloc())
+ mheap_.cleanupID++
+ id := mheap_.cleanupID
unlock(&mheap_.speciallock)
s.special.kind = _KindSpecialCleanup
s.fn = f
+ s.id = id
mp := acquirem()
addspecial(p, &s.special, true)
// special isn't part of the GC'd heap.
scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
}
+ return id
}
// The described object has a weak pointer.