// add an initial entry to bias len(m.dirty) above the miss count.
m.Store(nil, struct{}{})
- var finalized uint32
+ var cleanedUp uint32
- // Set finalizers that count for collected keys. A non-zero count
+ // Add cleanups that count for collected keys. A non-zero count
// indicates that keys have not been leaked.
- for atomic.LoadUint32(&finalized) == 0 {
+ for atomic.LoadUint32(&cleanedUp) == 0 {
p := new(int)
- runtime.SetFinalizer(p, func(*int) {
- atomic.AddUint32(&finalized, 1)
- })
+ runtime.AddCleanup(p, func(c *uint32) { atomic.AddUint32(c, 1) }, &cleanedUp)
m.Store(p, struct{}{})
m.Delete(p)
runtime.GC()
t.Run(n, func(t *testing.T) {
buf := make([]byte, 1024)
var gc atomic.Bool
- runtime.SetFinalizer(&buf[0], func(_ *byte) {
- gc.Store(true)
- })
+ runtime.AddCleanup(&buf[0], func(g *atomic.Bool) { g.Store(true) }, &gc)
f := fn(buf)
gcwaitfin()
if gc.Load() != false {
if try == 1 && testing.Short() {
break
}
- var fin, fin1 uint32
+ var cln, cln1 uint32
for i := 0; i < N; i++ {
v := new(string)
- runtime.SetFinalizer(v, func(vv *string) {
- atomic.AddUint32(&fin, 1)
- })
+ runtime.AddCleanup(v, func(f *uint32) { atomic.AddUint32(f, 1) }, &cln)
p.Put(v)
}
if drain {
runtime.GC()
time.Sleep(time.Duration(i*100+10) * time.Millisecond)
// 1 pointer can remain on stack or elsewhere
- if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 {
+ if cln1 = atomic.LoadUint32(&cln); cln1 >= N-1 {
continue loop
}
}
- t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try)
+ t.Fatalf("only %v out of %v resources are cleaned up on try %v", cln1, N, try)
}
}