From: Austin Clements Date: Wed, 20 Jul 2022 18:12:56 +0000 (-0400) Subject: runtime: add mayAcquire annotation for trace.lock X-Git-Tag: go1.20rc1~1681 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=502b6057d2a30b284a3889e4439b47289244382b;p=gostls13.git runtime: add mayAcquire annotation for trace.lock Now that we've moved the trace locks to the leaf of the lock graph, we can safely annotate that any trace event may acquire trace.lock even if dynamically it turns out a particular event doesn't need to flush and acquire this lock. This reveals a new edge where we can trace while holding the mheap lock, so we add this to the lock graph. For #53789. Updates #53979. Change-Id: I13e2f6cd1b621cca4bed0cc13ef12e64d05c89a7 Reviewed-on: https://go-review.googlesource.com/c/go/+/418720 Reviewed-by: Michael Knyszek Run-TryBot: Austin Clements TryBot-Result: Gopher Robot --- diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go index ba4dd71c71..fbfef35770 100644 --- a/src/runtime/lockrank.go +++ b/src/runtime/lockrank.go @@ -174,8 +174,8 @@ var lockPartialOrder [][]lockRank = [][]lockRank{ lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMheapSpecial, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin}, - lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankTrace}, + lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, lockRankPanic: {}, lockRankDeadlock: {lockRankPanic, lockRankDeadlock}, } diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go index 4445e8327e..9f269fd7b5 100644 --- a/src/runtime/mklockrank.go +++ b/src/runtime/mklockrank.go @@ -159,6 +159,7 @@ mheap, mheapSpecial < globalAlloc; # Execution tracer events (with a P) hchan, + mheap, root, sched, traceStrings, diff --git a/src/runtime/trace.go b/src/runtime/trace.go index 56fd1ba37b..4290d92240 100644 --- a/src/runtime/trace.go +++ b/src/runtime/trace.go @@ -883,6 +883,11 @@ func traceStackID(mp *m, buf []uintptr, skip int) uint64 { // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it. func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) { + // Any time we acquire a buffer, we may end up flushing it, + // but flushes are rare. Record the lock edge even if it + // doesn't happen this time. + lockRankMayTraceFlush() + mp = acquirem() if p := mp.p.ptr(); p != nil { return mp, p.id, &p.tracebuf @@ -899,6 +904,16 @@ func traceReleaseBuffer(pid int32) { releasem(getg().m) } +// lockRankMayTraceFlush records the lock ranking effects of a +// potential call to traceFlush. +func lockRankMayTraceFlush() { + owner := trace.lockOwner + dolock := owner == nil || owner != getg().m.curg + if dolock { + lockWithRankMayAcquire(&trace.lock, getLockRank(&trace.lock)) + } +} + // traceFlush puts buf onto stack of full buffers and returns an empty buffer. // // This must run on the system stack because it acquires trace.lock.