//
//go:nosplit
func debugCallWrap(dispatch uintptr) {
- var lockedm bool
var lockedExt uint32
callerpc := getcallerpc()
gp := getg()
+ // Lock ourselves to the OS thread.
+ //
+ // Debuggers rely on us running on the same thread until we get to
+ // dispatch the function they asked as to.
+ //
+ // We're going to transfer this to the new G we just created.
+ lockOSThread()
+
// Create a new goroutine to execute the call on. Run this on
// the system stack to avoid growing our stack.
systemstack(func() {
}
newg.param = unsafe.Pointer(args)
- // If the current G is locked, then transfer that
- // locked-ness to the new goroutine.
- if gp.lockedm != 0 {
- // Save lock state to restore later.
- mp := gp.m
- if mp != gp.lockedm.ptr() {
- throw("inconsistent lockedm")
- }
-
- lockedm = true
- lockedExt = mp.lockedExt
-
- // Transfer external lock count to internal so
- // it can't be unlocked from the debug call.
- mp.lockedInt++
- mp.lockedExt = 0
-
- mp.lockedg.set(newg)
- newg.lockedm.set(mp)
- gp.lockedm = 0
+ // Transfer locked-ness to the new goroutine.
+ // Save lock state to restore later.
+ mp := gp.m
+ if mp != gp.lockedm.ptr() {
+ throw("inconsistent lockedm")
}
+ // Save the external lock count and clear it so
+ // that it can't be unlocked from the debug call.
+ // Note: we already locked internally to the thread,
+ // so if we were locked before we're still locked now.
+ lockedExt = mp.lockedExt
+ mp.lockedExt = 0
+
+ mp.lockedg.set(newg)
+ newg.lockedm.set(mp)
+ gp.lockedm = 0
// Mark the calling goroutine as being at an async
// safe-point, since it has a few conservative frames
// We'll resume here when the call returns.
// Restore locked state.
- if lockedm {
- mp := gp.m
- mp.lockedExt = lockedExt
- mp.lockedInt--
- mp.lockedg.set(gp)
- gp.lockedm.set(mp)
- }
+ mp := gp.m
+ mp.lockedExt = lockedExt
+ mp.lockedg.set(gp)
+ gp.lockedm.set(mp)
+
+ // Undo the lockOSThread we did earlier.
+ unlockOSThread()
gp.asyncSafePoint = false
}