e, ok = read.m[key]
if !ok && read.amended {
e, ok = m.dirty[key]
+ delete(m.dirty, key)
// Regardless of whether the entry was present, record a miss: this key
// will take the slow path until the dirty map is promoted to the read
// map.
"reflect"
"runtime"
"sync"
+ "sync/atomic"
"testing"
"testing/quick"
)
}
}
}
+
+func TestIssue40999(t *testing.T) {
+ var m sync.Map
+
+ // Since the miss-counting in missLocked (via Delete)
+ // compares the miss count with len(m.dirty),
+ // add an initial entry to bias len(m.dirty) above the miss count.
+ m.Store(nil, struct{}{})
+
+ var finalized uint32
+
+ // Set finalizers that count for collected keys. A non-zero count
+ // indicates that keys have not been leaked.
+ for atomic.LoadUint32(&finalized) == 0 {
+ p := new(int)
+ runtime.SetFinalizer(p, func(*int) {
+ atomic.AddUint32(&finalized, 1)
+ })
+ m.Store(p, struct{}{})
+ m.Delete(p)
+ runtime.GC()
+ }
+}