From: Cherry Mui Date: Wed, 4 Oct 2023 14:21:39 +0000 (-0400) Subject: runtime: unlock OS thread after cgocallbackg1 X-Git-Tag: go1.22rc1~684 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=68a12a80235fae67dc64fa2d232186a2e85c05f5;p=gostls13.git runtime: unlock OS thread after cgocallbackg1 For cgo callbacks, currently cgocallbackg locks the OS thread and then call cgocallbackg1, which invokes the actual callback, and then unlocks the OS thread in a deferred call. cgocallback then continues assuming we are on the same M. This assumes there is no preemption point between the deferred unlockOSThread and returning to the caller (cgocallbackg). But this is not always true. E.g. when open defer is not used (e.g. PIE or shared build mode on 386), there is a preemption point in deferreturn after invoking the deferred function (when it checks whether there are still defers to run). Instead of relying on and requiring the defer implementation has no preemption point, we move the unlockOSThread to the caller, and ensuring no preemption by setting incgo to true before unlocking. This doesn't cover the panicking path, so we also adds an unlockOSThread there. There we don't need to worry about preemption, because we're panicking out of the callback and we have unwound the g0 stack, instead of reentering cgo. Fixes #62102. Change-Id: I0e0b9f9091be88d01675c0acb7339b81402545be Reviewed-on: https://go-review.googlesource.com/c/go/+/532615 LUCI-TryBot-Result: Go LUCI Reviewed-by: Ian Lance Taylor --- diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index e4da34b31d..73efd28491 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -266,7 +266,7 @@ func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) { // Don't use these bounds if they don't contain SP. Perhaps we // were called by something not using the standard thread // stack. - if bounds[0] != 0 && sp > bounds[0] && sp <= bounds[1] { + if bounds[0] != 0 && sp > bounds[0] && sp <= bounds[1] { g0.stack.lo = bounds[0] g0.stack.hi = bounds[1] } @@ -291,7 +291,8 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { // The call from C is on gp.m's g0 stack, so we must ensure // that we stay on that M. We have to do this before calling // exitsyscall, since it would otherwise be free to move us to - // a different M. The call to unlockOSThread is in unwindm. + // a different M. The call to unlockOSThread is in this function + // after cgocallbackg1, or in the case of panicking, in unwindm. lockOSThread() checkm := gp.m @@ -318,13 +319,14 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { panic("runtime: function marked with #cgo nocallback called back into Go") } - cgocallbackg1(fn, frame, ctxt) // will call unlockOSThread + cgocallbackg1(fn, frame, ctxt) - // At this point unlockOSThread has been called. + // At this point we're about to call unlockOSThread. // The following code must not change to a different m. // This is enforced by checking incgo in the schedule function. - gp.m.incgo = true + unlockOSThread() + if gp.m.isextra { gp.m.isExtraInC = true } @@ -344,10 +346,6 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) { gp := getg() - // When we return, undo the call to lockOSThread in cgocallbackg. - // We must still stay on the same m. - defer unlockOSThread() - if gp.m.needextram || extraMWaiters.Load() > 0 { gp.m.needextram = false systemstack(newextram) @@ -432,6 +430,14 @@ func unwindm(restore *bool) { osPreemptExtExit(mp) } + // Undo the call to lockOSThread in cgocallbackg, only on the + // panicking path. In normal return case cgocallbackg will call + // unlockOSThread, ensuring no preemption point after the unlock. + // Here we don't need to worry about preemption, because we're + // panicking out of the callback and unwinding the g0 stack, + // instead of reentering cgo (which requires the same thread). + unlockOSThread() + releasem(mp) } }