lockRankMcentral // For !go115NewMCentralImpl
lockRankSpine // For !go115NewMCentralImpl
lockRankSpanSetSpine
+ lockRankGscan
lockRankStackpool
lockRankStackLarge
lockRankDefer
// Other leaf locks
lockRankGFree
+ // Generally, hchan must be acquired before gscan. But in one specific
+ // case (in syncadjustsudogs from markroot after the g has been suspended
+ // by suspendG), we allow gscan to be acquired, and then an hchan lock. To
+ // allow this case, we get this lockRankHchanLeaf rank in
+ // syncadjustsudogs(), rather than lockRankHchan. By using this special
+ // rank, we don't allow any further locks to be acquired other than more
+ // hchan locks.
+ lockRankHchanLeaf
// Leaf locks with no dependencies, so these constants are not actually used anywhere.
// There are other architecture-dependent leaf locks as well.
lockRankMcentral: "mcentral",
lockRankSpine: "spine",
lockRankSpanSetSpine: "spanSetSpine",
+ lockRankGscan: "gscan",
lockRankStackpool: "stackpool",
lockRankStackLarge: "stackLarge",
lockRankDefer: "defer",
lockRankGlobalAlloc: "globalAlloc.mutex",
- lockRankGFree: "gFree",
+ lockRankGFree: "gFree",
+ lockRankHchanLeaf: "hchanLeaf",
lockRankNewmHandoff: "newmHandoff.lock",
lockRankDebugPtrmask: "debugPtrmask.lock",
lockRankMcentral: {lockRankScavenge, lockRankForcegc, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan},
lockRankSpine: {lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan},
lockRankSpanSetSpine: {lockRankScavenge, lockRankForcegc, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan},
- lockRankStackpool: {lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankPollDesc, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankFin, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankRwmutexR, lockRankMcentral, lockRankSpine, lockRankSpanSetSpine},
- lockRankStackLarge: {lockRankAssistQueue, lockRankSched, lockRankItab, lockRankHchan, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankMcentral, lockRankSpanSetSpine},
+ lockRankGscan: {lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankFin, lockRankTraceBuf, lockRankTraceStrings, lockRankRoot, lockRankNotifyList, lockRankProf, lockRankGcBitsArenas, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankMcentral, lockRankSpine, lockRankSpanSetSpine},
+ lockRankStackpool: {lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankPollDesc, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankFin, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankRwmutexR, lockRankMcentral, lockRankSpine, lockRankSpanSetSpine, lockRankGscan},
+ lockRankStackLarge: {lockRankAssistQueue, lockRankSched, lockRankItab, lockRankHchan, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankMcentral, lockRankSpanSetSpine, lockRankGscan},
lockRankDefer: {},
lockRankSudog: {lockRankNotifyList, lockRankHchan},
- lockRankWbufSpans: {lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankSched, lockRankAllg, lockRankPollDesc, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankNotifyList, lockRankTraceStrings, lockRankMspanSpecial, lockRankProf, lockRankRoot, lockRankDefer, lockRankSudog},
- lockRankMheap: {lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankPollDesc, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan, lockRankMspanSpecial, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankMcentral, lockRankStackpool, lockRankStackLarge, lockRankDefer, lockRankSudog, lockRankWbufSpans, lockRankSpanSetSpine},
+ lockRankWbufSpans: {lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankSched, lockRankAllg, lockRankPollDesc, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankNotifyList, lockRankTraceStrings, lockRankMspanSpecial, lockRankProf, lockRankRoot, lockRankGscan, lockRankDefer, lockRankSudog},
+ lockRankMheap: {lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankPollDesc, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan, lockRankMspanSpecial, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankMcentral, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankDefer, lockRankSudog, lockRankWbufSpans, lockRankSpanSetSpine},
lockRankMheapSpecial: {lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan},
lockRankGlobalAlloc: {lockRankProf, lockRankSpine, lockRankSpanSetSpine, lockRankMheap, lockRankMheapSpecial},
- lockRankGFree: {lockRankSched},
+ lockRankGFree: {lockRankSched},
+ lockRankHchanLeaf: {lockRankGscan, lockRankHchanLeaf},
lockRankNewmHandoff: {},
lockRankDebugPtrmask: {},
// when acquiring a non-static lock.
//go:nosplit
func lockWithRank(l *mutex, rank lockRank) {
- if l == &debuglock {
- // debuglock is only used for println/printlock(). Don't do lock rank
- // recording for it, since print/println are used when printing
- // out a lock ordering problem below.
+ if l == &debuglock || l == &paniclk {
+ // debuglock is only used for println/printlock(). Don't do lock
+ // rank recording for it, since print/println are used when
+ // printing out a lock ordering problem below.
+ //
+ // paniclk has an ordering problem, since it can be acquired
+ // during a panic with any other locks held (especially if the
+ // panic is because of a directed segv), and yet also allg is
+ // acquired after paniclk in tracebackothers()). This is a genuine
+ // problem, so for now we don't do lock rank recording for paniclk
+ // either.
lock2(l)
return
}
})
}
+// acquireLockRank acquires a rank which is not associated with a mutex lock
+//go:nosplit
+func acquireLockRank(rank lockRank) {
+ gp := getg()
+ // Log the new class.
+ systemstack(func() {
+ i := gp.m.locksHeldLen
+ if i >= len(gp.m.locksHeld) {
+ throw("too many locks held concurrently for rank checking")
+ }
+ gp.m.locksHeld[i].rank = rank
+ gp.m.locksHeld[i].lockAddr = 0
+ gp.m.locksHeldLen++
+
+ // i is the index of the lock being acquired
+ if i > 0 {
+ checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
+ }
+ })
+}
+
+// checkRanks checks if goroutine g, which has mostly recently acquired a lock
+// with rank 'prevRank', can now acquire a lock with rank 'rank'.
func checkRanks(gp *g, prevRank, rank lockRank) {
rankOK := false
- // If rank < prevRank, then we definitely have a rank error
- if prevRank <= rank {
- if rank == lockRankLeafRank {
- // If new lock is a leaf lock, then the preceding lock can
- // be anything except another leaf lock.
- rankOK = prevRank < lockRankLeafRank
- } else {
- // We've already verified the total lock ranking, but we
- // also enforce the partial ordering specified by
- // lockPartialOrder as well. Two locks with the same rank
- // can only be acquired at the same time if explicitly
- // listed in the lockPartialOrder table.
- list := lockPartialOrder[rank]
- for _, entry := range list {
- if entry == prevRank {
- rankOK = true
- break
- }
+ if rank < prevRank {
+ // If rank < prevRank, then we definitely have a rank error
+ rankOK = false
+ } else if rank == lockRankLeafRank {
+ // If new lock is a leaf lock, then the preceding lock can
+ // be anything except another leaf lock.
+ rankOK = prevRank < lockRankLeafRank
+ } else {
+ // We've now verified the total lock ranking, but we
+ // also enforce the partial ordering specified by
+ // lockPartialOrder as well. Two locks with the same rank
+ // can only be acquired at the same time if explicitly
+ // listed in the lockPartialOrder table.
+ list := lockPartialOrder[rank]
+ for _, entry := range list {
+ if entry == prevRank {
+ rankOK = true
+ break
}
}
}
}
//go:nosplit
-func lockRankRelease(l *mutex) {
- if l == &debuglock {
- // debuglock is only used for print/println. Don't do lock rank
- // recording for it, since print/println are used when printing
- // out a lock ordering problem below.
+func unlockWithRank(l *mutex) {
+ if l == &debuglock || l == &paniclk {
+ // See comment at beginning of lockWithRank.
unlock2(l)
return
}
found = true
copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
gp.m.locksHeldLen--
+ break
}
}
if !found {
})
}
+// releaseLockRank releases a rank which is not associated with a mutex lock
+//go:nosplit
+func releaseLockRank(rank lockRank) {
+ gp := getg()
+ systemstack(func() {
+ found := false
+ for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
+ if gp.m.locksHeld[i].rank == rank && gp.m.locksHeld[i].lockAddr == 0 {
+ found = true
+ copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
+ gp.m.locksHeldLen--
+ break
+ }
+ }
+ if !found {
+ println(gp.m.procid, ":", rank.String(), rank)
+ throw("lockRank release without matching lockRank acquire")
+ }
+ })
+}
+
//go:nosplit
func lockWithRankMayAcquire(l *mutex, rank lockRank) {
gp := getg()