From d3836aba316a6a5ee9410b9abd2f42120af4ffe6 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Fri, 14 Oct 2016 13:39:07 -0400 Subject: [PATCH] runtime: ensure finalizers are zero-initialized before reuse We reuse finalizers in finblocks, which are allocated off-heap. This means they have to be zero-initialized before becoming visible to the garbage collector. We actually already do this by clearing the finalizer before returning it to the pool, but we're not careful to enforce correct memory ordering. Fix this by manipulating the finalizer count atomically so these writes synchronize properly with the garbage collector. Updates #17503. Change-Id: I7797d31df3c656c9fe654bc6da287f66a9e2037d Reviewed-on: https://go-review.googlesource.com/31454 Reviewed-by: Rick Hudson --- src/runtime/mfinal.go | 15 +++++++++------ src/runtime/mgcmark.go | 3 ++- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go index dae3956cd1..e0da4a3ac0 100644 --- a/src/runtime/mfinal.go +++ b/src/runtime/mfinal.go @@ -19,7 +19,7 @@ import ( type finblock struct { alllink *finblock next *finblock - cnt int32 + cnt uint32 _ int32 fin [(_FinBlockSize - 2*sys.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer } @@ -72,7 +72,7 @@ var finalizer1 = [...]byte{ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) { lock(&finlock) - if finq == nil || finq.cnt == int32(len(finq.fin)) { + if finq == nil || finq.cnt == uint32(len(finq.fin)) { if finc == nil { finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gc_sys)) finc.alllink = allfin @@ -99,7 +99,7 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot finq = block } f := &finq.fin[finq.cnt] - finq.cnt++ + atomic.Xadd(&finq.cnt, +1) // Sync with markroots f.fn = fn f.nret = nret f.fint = fint @@ -112,7 +112,7 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot //go:nowritebarrier func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) { for fb := allfin; fb != nil; fb = fb.alllink { - for i := int32(0); i < fb.cnt; i++ { + for i := uint32(0); i < fb.cnt; i++ { f := &fb.fin[i] callback(f.fn, f.arg, f.nret, f.fint, f.ot) } @@ -208,11 +208,14 @@ func runfinq() { reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz)) fingRunning = false - // drop finalizer queue references to finalized object + // Drop finalizer queue heap references + // before hiding them from markroot. + // This also ensures these will be + // clear if we reuse the finalizer. f.fn = nil f.arg = nil f.ot = nil - fb.cnt = i - 1 + atomic.Store(&fb.cnt, i-1) } next := fb.next lock(&finlock) diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index 022fbf24ea..edb8af25f5 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -186,7 +186,8 @@ func markroot(gcw *gcWork, i uint32) { case i == fixedRootFinalizers: for fb := allfin; fb != nil; fb = fb.alllink { - scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), uintptr(fb.cnt)*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw) + cnt := uintptr(atomic.Load(&fb.cnt)) + scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw) } case i == fixedRootFreeGStacks: -- 2.48.1