]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: perform debug call injection on a new goroutine
authorAustin Clements <austin@google.com>
Wed, 15 Apr 2020 19:38:00 +0000 (15:38 -0400)
committerAustin Clements <austin@google.com>
Wed, 29 Apr 2020 21:29:11 +0000 (21:29 +0000)
Currently, when a debugger injects a call, that call happens on the
goroutine where the debugger injected it. However, this requires
significant runtime complexity that we're about to remove.

To prepare for this, this CL switches to a different approach that
leaves the interrupted goroutine parked and runs the debug call on a
new goroutine. When the debug call returns, it resumes the original
goroutine.

This should be essentially transparent to debuggers. It follows the
exact same call injection protocol and ensures the whole protocol
executes indivisibly on a single OS thread. The only difference is
that the current G and stack now change part way through the protocol.

For #36365.

Change-Id: I68463bfd73cbee06cfc49999606410a59dd8f653
Reviewed-on: https://go-review.googlesource.com/c/go/+/229299
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
src/runtime/debugcall.go
src/runtime/export_debug_test.go
src/runtime/runtime2.go

index f03d2358ebb2682a1a0c5c25e274ab698dd6be82..0644f71aafcb5c91222ab71865b8646bad5d3ef8 100644 (file)
@@ -95,9 +95,129 @@ func debugCallCheck(pc uintptr) string {
        return ret
 }
 
-// debugCallWrap pushes a defer to recover from panics in debug calls
-// and then calls the dispatching function at PC dispatch.
+// debugCallWrap starts a new goroutine to run a debug call and blocks
+// the calling goroutine. On the goroutine, it prepares to recover
+// panics from the debug call, and then calls the call dispatching
+// function at PC dispatch.
 func debugCallWrap(dispatch uintptr) {
+       var lockedm bool
+       var lockedExt uint32
+       callerpc := getcallerpc()
+       gp := getg()
+
+       // Create a new goroutine to execute the call on. Run this on
+       // the system stack to avoid growing our stack.
+       systemstack(func() {
+               var args struct {
+                       dispatch uintptr
+                       callingG *g
+               }
+               args.dispatch = dispatch
+               args.callingG = gp
+               fn := debugCallWrap1
+               newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), unsafe.Pointer(&args), int32(unsafe.Sizeof(args)), gp, callerpc)
+
+               // If the current G is locked, then transfer that
+               // locked-ness to the new goroutine.
+               if gp.lockedm != 0 {
+                       // Save lock state to restore later.
+                       mp := gp.m
+                       if mp != gp.lockedm.ptr() {
+                               throw("inconsistent lockedm")
+                       }
+
+                       lockedm = true
+                       lockedExt = mp.lockedExt
+
+                       // Transfer external lock count to internal so
+                       // it can't be unlocked from the debug call.
+                       mp.lockedInt++
+                       mp.lockedExt = 0
+
+                       mp.lockedg.set(newg)
+                       newg.lockedm.set(mp)
+                       gp.lockedm = 0
+               }
+
+               // Stash newg away so we can execute it below (mcall's
+               // closure can't capture anything).
+               gp.schedlink.set(newg)
+       })
+
+       // Switch to the new goroutine.
+       mcall(func(gp *g) {
+               // Get newg.
+               newg := gp.schedlink.ptr()
+               gp.schedlink = 0
+
+               // Park the calling goroutine.
+               gp.waitreason = waitReasonDebugCall
+               if trace.enabled {
+                       traceGoPark(traceEvGoBlock, 1)
+               }
+               casgstatus(gp, _Grunning, _Gwaiting)
+               dropg()
+
+               // Directly execute the new goroutine. The debug
+               // protocol will continue on the new goroutine, so
+               // it's important we not just let the scheduler do
+               // this or it may resume a different goroutine.
+               execute(newg, true)
+       })
+
+       // We'll resume here when the call returns.
+
+       // Restore locked state.
+       if lockedm {
+               mp := gp.m
+               mp.lockedExt = lockedExt
+               mp.lockedInt--
+               mp.lockedg.set(gp)
+               gp.lockedm.set(mp)
+       }
+}
+
+// debugCallWrap1 is the continuation of debugCallWrap on the callee
+// goroutine.
+func debugCallWrap1(dispatch uintptr, callingG *g) {
+       // Dispatch call and trap panics.
+       debugCallWrap2(dispatch)
+
+       // Resume the caller goroutine.
+       getg().schedlink.set(callingG)
+       mcall(func(gp *g) {
+               callingG := gp.schedlink.ptr()
+               gp.schedlink = 0
+
+               // Unlock this goroutine from the M if necessary. The
+               // calling G will relock.
+               if gp.lockedm != 0 {
+                       gp.lockedm = 0
+                       gp.m.lockedg = 0
+               }
+
+               // Switch back to the calling goroutine. At some point
+               // the scheduler will schedule us again and we'll
+               // finish exiting.
+               if trace.enabled {
+                       traceGoSched()
+               }
+               casgstatus(gp, _Grunning, _Grunnable)
+               dropg()
+               lock(&sched.lock)
+               globrunqput(gp)
+               unlock(&sched.lock)
+
+               if trace.enabled {
+                       traceGoUnpark(callingG, 0)
+               }
+               casgstatus(callingG, _Gwaiting, _Grunnable)
+               execute(callingG, true)
+       })
+}
+
+func debugCallWrap2(dispatch uintptr) {
+       // Call the dispatch function and trap panics.
        var dispatchF func()
        dispatchFV := funcval{dispatch}
        *(*unsafe.Pointer)(unsafe.Pointer(&dispatchF)) = noescape(unsafe.Pointer(&dispatchFV))
index 97bb7bd62ac162246a5f8853fc324f9ce5269796..ed4242ef241e6ea70c655d5f1da2edf29bffa13a 100644 (file)
@@ -48,6 +48,9 @@ func InjectDebugCall(gp *g, fn, args interface{}, tkill func(tid int) error, ret
 
        h := new(debugCallHandler)
        h.gp = gp
+       // gp may not be running right now, but we can still get the M
+       // it will run on since it's locked.
+       h.mp = gp.lockedm.ptr()
        h.fv, h.argp, h.argSize = fv, argp, argSize
        h.handleF = h.handle // Avoid allocating closure during signal
 
@@ -86,6 +89,7 @@ func InjectDebugCall(gp *g, fn, args interface{}, tkill func(tid int) error, ret
 
 type debugCallHandler struct {
        gp      *g
+       mp      *m
        fv      *funcval
        argp    unsafe.Pointer
        argSize uintptr
@@ -102,8 +106,8 @@ type debugCallHandler struct {
 func (h *debugCallHandler) inject(info *siginfo, ctxt *sigctxt, gp2 *g) bool {
        switch h.gp.atomicstatus {
        case _Grunning:
-               if getg().m != h.gp.m {
-                       println("trap on wrong M", getg().m, h.gp.m)
+               if getg().m != h.mp {
+                       println("trap on wrong M", getg().m, h.mp)
                        return false
                }
                // Push current PC on the stack.
@@ -135,8 +139,8 @@ func (h *debugCallHandler) inject(info *siginfo, ctxt *sigctxt, gp2 *g) bool {
 
 func (h *debugCallHandler) handle(info *siginfo, ctxt *sigctxt, gp2 *g) bool {
        // Sanity check.
-       if getg().m != h.gp.m {
-               println("trap on wrong M", getg().m, h.gp.m)
+       if getg().m != h.mp {
+               println("trap on wrong M", getg().m, h.mp)
                return false
        }
        f := findfunc(uintptr(ctxt.rip()))
index 15e24c8175b38efaa513fa0c030c4a9bf6f73871..89a241911057a299ac3a20a592199611a75f5a72 100644 (file)
@@ -980,6 +980,7 @@ const (
        waitReasonWaitForGCCycle                          // "wait for GC cycle"
        waitReasonGCWorkerIdle                            // "GC worker (idle)"
        waitReasonPreempted                               // "preempted"
+       waitReasonDebugCall                               // "debug call"
 )
 
 var waitReasonStrings = [...]string{
@@ -1009,6 +1010,7 @@ var waitReasonStrings = [...]string{
        waitReasonWaitForGCCycle:        "wait for GC cycle",
        waitReasonGCWorkerIdle:          "GC worker (idle)",
        waitReasonPreempted:             "preempted",
+       waitReasonDebugCall:             "debug call",
 }
 
 func (w waitReason) String() string {