package runtime
import (
+ "internal/abi"
"internal/cpu"
"internal/goarch"
"internal/runtime/atomic"
// the lock.
cleanupID uint64
+ _ cpu.CacheLinePad
+
+ immortalWeakHandles immortalWeakHandleMap
+
unused *specialfinalizer // never set, just here to force the specialfinalizer type into DWARF
}
// even if it's just some random span.
span := spanOfHeap(p)
if span == nil {
- // The span probably got swept and released.
+ // If it's immortal, then just return the pointer.
+ //
+ // Stay non-preemptible so the GC can't see us convert this potentially
+ // completely bogus value to an unsafe.Pointer.
+ if isGoPointerWithoutSpan(unsafe.Pointer(p)) {
+ releasem(mp)
+ return unsafe.Pointer(p)
+ }
+ // It's heap-allocated, so the span probably just got swept and released.
releasem(mp)
return nil
}
func getWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
span := spanOfHeap(uintptr(p))
if span == nil {
+ if isGoPointerWithoutSpan(p) {
+ return mheap_.immortalWeakHandles.getOrAdd(uintptr(p))
+ }
throw("getWeakHandle on invalid pointer")
}
return handle
}
+type immortalWeakHandleMap struct {
+ root atomic.UnsafePointer // *immortalWeakHandle (can't use generics because it's notinheap)
+}
+
+// immortalWeakHandle is a lock-free append-only hash-trie.
+//
+// Key features:
+// - 2-ary trie. Child nodes are indexed by the highest bit (remaining) of the hash of the address.
+// - New nodes are placed at the first empty level encountered.
+// - When the first child is added to a node, the existing value is not moved into a child.
+// This means that we must check the value at each level, not just at the leaf.
+// - No deletion or rebalancing.
+// - Intentionally devolves into a linked list on hash collisions (the hash bits will all
+// get shifted out during iteration, and new nodes will just be appended to the 0th child).
+type immortalWeakHandle struct {
+ _ sys.NotInHeap
+
+ children [2]atomic.UnsafePointer // *immortalObjectMapNode (can't use generics because it's notinheap)
+ ptr uintptr // &ptr is the weak handle
+}
+
+// handle returns a canonical weak handle.
+func (h *immortalWeakHandle) handle() *atomic.Uintptr {
+ // N.B. Since we just need an *atomic.Uintptr that never changes, we can trivially
+ // reference ptr to save on some memory in immortalWeakHandle and avoid extra atomics
+ // in getOrAdd.
+ return (*atomic.Uintptr)(unsafe.Pointer(&h.ptr))
+}
+
+// getOrAdd introduces p, which must be a pointer to immortal memory (for example, a linker-allocated
+// object) and returns a weak handle. The weak handle will never become nil.
+func (tab *immortalWeakHandleMap) getOrAdd(p uintptr) *atomic.Uintptr {
+ var newNode *immortalWeakHandle
+ m := &tab.root
+ hash := memhash(abi.NoEscape(unsafe.Pointer(&p)), 0, goarch.PtrSize)
+ hashIter := hash
+ for {
+ n := (*immortalWeakHandle)(m.Load())
+ if n == nil {
+ // Try to insert a new map node. We may end up discarding
+ // this node if we fail to insert because it turns out the
+ // value is already in the map.
+ //
+ // The discard will only happen if two threads race on inserting
+ // the same value. Both might create nodes, but only one will
+ // succeed on insertion. If two threads race to insert two
+ // different values, then both nodes will *always* get inserted,
+ // because the equality checking below will always fail.
+ //
+ // Performance note: contention on insertion is likely to be
+ // higher for small maps, but since this data structure is
+ // append-only, either the map stays small because there isn't
+ // much activity, or the map gets big and races to insert on
+ // the same node are much less likely.
+ if newNode == nil {
+ newNode = (*immortalWeakHandle)(persistentalloc(unsafe.Sizeof(immortalWeakHandle{}), goarch.PtrSize, &memstats.gcMiscSys))
+ newNode.ptr = p
+ }
+ if m.CompareAndSwapNoWB(nil, unsafe.Pointer(newNode)) {
+ return newNode.handle()
+ }
+ // Reload n. Because pointers are only stored once,
+ // we must have lost the race, and therefore n is not nil
+ // anymore.
+ n = (*immortalWeakHandle)(m.Load())
+ }
+ if n.ptr == p {
+ return n.handle()
+ }
+ m = &n.children[hashIter>>(8*goarch.PtrSize-1)]
+ hashIter <<= 1
+ }
+}
+
// The described object is being heap profiled.
type specialprofile struct {
_ sys.NotInHeap
// in a tiny block making the tests in this package flaky.
t *T
a int
+ b int
}
func TestPointer(t *testing.T) {
t.Fatal("failed to look up special and made duplicate weak handle; see issue #70739")
}
}
+
+var immortal T
+
+func TestImmortalPointer(t *testing.T) {
+ w0 := weak.Make(&immortal)
+ if weak.Make(&immortal) != w0 {
+ t.Error("immortal weak pointers to the same pointer not equal")
+ }
+ w0a := weak.Make(&immortal.a)
+ w0b := weak.Make(&immortal.b)
+ if w0a == w0b {
+ t.Error("separate immortal pointers (same object) have the same pointer")
+ }
+ if got, want := w0.Value(), &immortal; got != want {
+ t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got)
+ }
+ if got, want := w0a.Value(), &immortal.a; got != want {
+ t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got)
+ }
+ if got, want := w0b.Value(), &immortal.b; got != want {
+ t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got)
+ }
+
+ // Run a couple of cycles.
+ runtime.GC()
+ runtime.GC()
+
+ // All immortal weak pointers should never get cleared.
+ if got, want := w0.Value(), &immortal; got != want {
+ t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got)
+ }
+ if got, want := w0a.Value(), &immortal.a; got != want {
+ t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got)
+ }
+ if got, want := w0b.Value(), &immortal.b; got != want {
+ t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got)
+ }
+}