From 6d7cb594b358b9b22709fb7a7940abc4c9778074 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Fri, 14 Feb 2025 18:39:29 +0000 Subject: [PATCH] weak: accept linker-allocated objects to Make Currently Make panics when passed a linker-allocated object. This is inconsistent with both runtime.AddCleanup and runtime.SetFinalizer. Not panicking in this case is important so that all pointers can be treated equally by these APIs. Libraries should not have to worry where a pointer came from to still make weak pointers. Supporting this behavior is a bit complex for weak pointers versus finalizers and cleanups. For the latter two, it means a function is never called, so we can just drop everything on the floor. For weak pointers, we still need to produce pointers that compare as per the API. To do this, copy the tiny lock-free trace map implementation and use it to store weak handles for "immortal" objects. These paths in the runtime should be rare, so it's OK if it's not incredibly fast, but we should keep the memory footprint relatively low (at least not have it be any worse than specials), so this change tweaks the map implementation a little bit to ensure that's the case. Fixes #71726. Change-Id: I0c87c9d90656d81659ac8d70f511773d0093ce27 Reviewed-on: https://go-review.googlesource.com/c/go/+/649460 Reviewed-by: Cherry Mui Auto-Submit: Michael Knyszek LUCI-TryBot-Result: Go LUCI --- src/runtime/mheap.go | 92 +++++++++++++++++++++++++++++++++++++++- src/weak/pointer_test.go | 39 +++++++++++++++++ 2 files changed, 130 insertions(+), 1 deletion(-) diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 21ae5b1a3b..28ca5c3a70 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -9,6 +9,7 @@ package runtime import ( + "internal/abi" "internal/cpu" "internal/goarch" "internal/runtime/atomic" @@ -246,6 +247,10 @@ type mheap struct { // the lock. cleanupID uint64 + _ cpu.CacheLinePad + + immortalWeakHandles immortalWeakHandleMap + unused *specialfinalizer // never set, just here to force the specialfinalizer type into DWARF } @@ -2138,7 +2143,15 @@ func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer { // even if it's just some random span. span := spanOfHeap(p) if span == nil { - // The span probably got swept and released. + // If it's immortal, then just return the pointer. + // + // Stay non-preemptible so the GC can't see us convert this potentially + // completely bogus value to an unsafe.Pointer. + if isGoPointerWithoutSpan(unsafe.Pointer(p)) { + releasem(mp) + return unsafe.Pointer(p) + } + // It's heap-allocated, so the span probably just got swept and released. releasem(mp) return nil } @@ -2275,6 +2288,9 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { func getWeakHandle(p unsafe.Pointer) *atomic.Uintptr { span := spanOfHeap(uintptr(p)) if span == nil { + if isGoPointerWithoutSpan(p) { + return mheap_.immortalWeakHandles.getOrAdd(uintptr(p)) + } throw("getWeakHandle on invalid pointer") } @@ -2303,6 +2319,80 @@ func getWeakHandle(p unsafe.Pointer) *atomic.Uintptr { return handle } +type immortalWeakHandleMap struct { + root atomic.UnsafePointer // *immortalWeakHandle (can't use generics because it's notinheap) +} + +// immortalWeakHandle is a lock-free append-only hash-trie. +// +// Key features: +// - 2-ary trie. Child nodes are indexed by the highest bit (remaining) of the hash of the address. +// - New nodes are placed at the first empty level encountered. +// - When the first child is added to a node, the existing value is not moved into a child. +// This means that we must check the value at each level, not just at the leaf. +// - No deletion or rebalancing. +// - Intentionally devolves into a linked list on hash collisions (the hash bits will all +// get shifted out during iteration, and new nodes will just be appended to the 0th child). +type immortalWeakHandle struct { + _ sys.NotInHeap + + children [2]atomic.UnsafePointer // *immortalObjectMapNode (can't use generics because it's notinheap) + ptr uintptr // &ptr is the weak handle +} + +// handle returns a canonical weak handle. +func (h *immortalWeakHandle) handle() *atomic.Uintptr { + // N.B. Since we just need an *atomic.Uintptr that never changes, we can trivially + // reference ptr to save on some memory in immortalWeakHandle and avoid extra atomics + // in getOrAdd. + return (*atomic.Uintptr)(unsafe.Pointer(&h.ptr)) +} + +// getOrAdd introduces p, which must be a pointer to immortal memory (for example, a linker-allocated +// object) and returns a weak handle. The weak handle will never become nil. +func (tab *immortalWeakHandleMap) getOrAdd(p uintptr) *atomic.Uintptr { + var newNode *immortalWeakHandle + m := &tab.root + hash := memhash(abi.NoEscape(unsafe.Pointer(&p)), 0, goarch.PtrSize) + hashIter := hash + for { + n := (*immortalWeakHandle)(m.Load()) + if n == nil { + // Try to insert a new map node. We may end up discarding + // this node if we fail to insert because it turns out the + // value is already in the map. + // + // The discard will only happen if two threads race on inserting + // the same value. Both might create nodes, but only one will + // succeed on insertion. If two threads race to insert two + // different values, then both nodes will *always* get inserted, + // because the equality checking below will always fail. + // + // Performance note: contention on insertion is likely to be + // higher for small maps, but since this data structure is + // append-only, either the map stays small because there isn't + // much activity, or the map gets big and races to insert on + // the same node are much less likely. + if newNode == nil { + newNode = (*immortalWeakHandle)(persistentalloc(unsafe.Sizeof(immortalWeakHandle{}), goarch.PtrSize, &memstats.gcMiscSys)) + newNode.ptr = p + } + if m.CompareAndSwapNoWB(nil, unsafe.Pointer(newNode)) { + return newNode.handle() + } + // Reload n. Because pointers are only stored once, + // we must have lost the race, and therefore n is not nil + // anymore. + n = (*immortalWeakHandle)(m.Load()) + } + if n.ptr == p { + return n.handle() + } + m = &n.children[hashIter>>(8*goarch.PtrSize-1)] + hashIter <<= 1 + } +} + // The described object is being heap profiled. type specialprofile struct { _ sys.NotInHeap diff --git a/src/weak/pointer_test.go b/src/weak/pointer_test.go index 70c743381c..d2ee651244 100644 --- a/src/weak/pointer_test.go +++ b/src/weak/pointer_test.go @@ -20,6 +20,7 @@ type T struct { // in a tiny block making the tests in this package flaky. t *T a int + b int } func TestPointer(t *testing.T) { @@ -252,3 +253,41 @@ func TestIssue70739(t *testing.T) { t.Fatal("failed to look up special and made duplicate weak handle; see issue #70739") } } + +var immortal T + +func TestImmortalPointer(t *testing.T) { + w0 := weak.Make(&immortal) + if weak.Make(&immortal) != w0 { + t.Error("immortal weak pointers to the same pointer not equal") + } + w0a := weak.Make(&immortal.a) + w0b := weak.Make(&immortal.b) + if w0a == w0b { + t.Error("separate immortal pointers (same object) have the same pointer") + } + if got, want := w0.Value(), &immortal; got != want { + t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got) + } + if got, want := w0a.Value(), &immortal.a; got != want { + t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got) + } + if got, want := w0b.Value(), &immortal.b; got != want { + t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got) + } + + // Run a couple of cycles. + runtime.GC() + runtime.GC() + + // All immortal weak pointers should never get cleared. + if got, want := w0.Value(), &immortal; got != want { + t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got) + } + if got, want := w0a.Value(), &immortal.a; got != want { + t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got) + } + if got, want := w0b.Value(), &immortal.b; got != want { + t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got) + } +} -- 2.51.0