// save syscall* and let reentersyscall restore them.
savedsp := unsafe.Pointer(gp.syscallsp)
savedpc := gp.syscallpc
+ savedbp := gp.syscallbp
exitsyscall() // coming out of cgo call
gp.m.incgo = false
if gp.m.isextra {
osPreemptExtEnter(gp.m)
// going back to cgo call
- reentersyscall(savedpc, uintptr(savedsp))
+ reentersyscall(savedpc, uintptr(savedsp), savedbp)
gp.m.winsyscall = winsyscall
}
var ctx context
ctx.set_ip(getcallerpc())
ctx.set_sp(getcallersp())
- fp := getfp()
- // getfp is not implemented on windows/386 and windows/arm,
- // in which case it returns 0.
- if fp != 0 {
- ctx.set_fp(*(*uintptr)(unsafe.Pointer(fp)))
- }
+ ctx.set_fp(getcallerfp())
return &ContextStub{ctx}
}
//
//go:nosplit
//go:nowritebarrierrec
-func save(pc, sp uintptr) {
+func save(pc, sp, bp uintptr) {
gp := getg()
if gp == gp.m.g0 || gp == gp.m.gsignal {
gp.sched.sp = sp
gp.sched.lr = 0
gp.sched.ret = 0
+ gp.sched.bp = bp
// We need to ensure ctxt is zero, but can't have a write
// barrier here. However, it should always already be zero.
// Assert that.
// entry point for syscalls, which obtains the SP and PC from the caller.
//
//go:nosplit
-func reentersyscall(pc, sp uintptr) {
+func reentersyscall(pc, sp, bp uintptr) {
trace := traceAcquire()
gp := getg()
gp.throwsplit = true
// Leave SP around for GC and traceback.
- save(pc, sp)
+ save(pc, sp, bp)
gp.syscallsp = sp
gp.syscallpc = pc
+ gp.syscallbp = bp
casgstatus(gp, _Grunning, _Gsyscall)
if staticLockRanking {
// When doing static lock ranking casgstatus can call
// systemstack which clobbers g.sched.
- save(pc, sp)
+ save(pc, sp, bp)
}
if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
systemstack(func() {
// systemstack itself clobbers g.sched.{pc,sp} and we might
// need them later when the G is genuinely blocked in a
// syscall
- save(pc, sp)
+ save(pc, sp, bp)
}
if sched.sysmonwait.Load() {
systemstack(entersyscall_sysmon)
- save(pc, sp)
+ save(pc, sp, bp)
}
if gp.m.p.ptr().runSafePointFn != 0 {
// runSafePointFn may stack split if run on this stack
systemstack(runSafePointFn)
- save(pc, sp)
+ save(pc, sp, bp)
}
gp.m.syscalltick = gp.m.p.ptr().syscalltick
atomic.Store(&pp.status, _Psyscall)
if sched.gcwaiting.Load() {
systemstack(entersyscall_gcwait)
- save(pc, sp)
+ save(pc, sp, bp)
}
gp.m.locks--
//go:nosplit
//go:linkname entersyscall
func entersyscall() {
- reentersyscall(getcallerpc(), getcallersp())
+ // N.B. getcallerfp cannot be written directly as argument in the call
+ // to reentersyscall because it forces spilling the other arguments to
+ // the stack. This results in exceeding the nosplit stack requirements
+ // on some platforms.
+ fp := getcallerfp()
+ reentersyscall(getcallerpc(), getcallersp(), fp)
}
func entersyscall_sysmon() {
// Leave SP around for GC and traceback.
pc := getcallerpc()
sp := getcallersp()
- save(pc, sp)
+ bp := getcallerfp()
+ save(pc, sp, bp)
gp.syscallsp = gp.sched.sp
gp.syscallpc = gp.sched.pc
if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
systemstack(entersyscallblock_handoff)
// Resave for traceback during blocked call.
- save(getcallerpc(), getcallersp())
+ save(getcallerpc(), getcallersp(), getcallerfp())
gp.m.locks--
}
sched gobuf
syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
+ syscallbp uintptr // if status==Gsyscall, syscallbp = sched.bp to use in fpTraceback
stktopsp uintptr // expected sp at top of stack, to check in traceback
// param is a generic pointer parameter field used to pass
// values in particular contexts where other storage for the
// Must agree with internal/buildcfg.FramePointerEnabled.
const framepointer_enabled = GOARCH == "amd64" || GOARCH == "arm64"
+
+// getcallerfp returns the frame pointer of the caller of the caller
+// of this function.
+//
+//go:nosplit
+//go:noinline
+func getcallerfp() uintptr {
+ fp := getfp() // This frame's FP.
+ if fp != 0 {
+ fp = *(*uintptr)(unsafe.Pointer(fp)) // The caller's FP.
+ fp = *(*uintptr)(unsafe.Pointer(fp)) // The caller's caller's FP.
+ }
+ return fp
+}
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
- {runtime.G{}, 268, 432}, // g, but exported for testing
+ {runtime.G{}, 272, 440}, // g, but exported for testing
{runtime.Sudog{}, 56, 88}, // sudog, but exported for testing
}
//
// Must be called with a valid P.
func (tl traceLocker) GoSysCall() {
- var skip int
- switch {
- case tracefpunwindoff():
- // Unwind by skipping 1 frame relative to gp.syscallsp which is captured 3
- // results by hard coding the number of frames in between our caller and the
- // actual syscall, see cases below.
- // TODO(felixge): Implement gp.syscallbp to avoid this workaround?
- skip = 1
- case GOOS == "solaris" || GOOS == "illumos":
- // These platforms don't use a libc_read_trampoline.
- skip = 3
- default:
- // Skip the extra trampoline frame used on most systems.
- skip = 4
- }
// Scribble down the M that the P is currently attached to.
pp := tl.mp.p.ptr()
pp.trace.mSyscallID = int64(tl.mp.procid)
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoSyscallBegin, pp.trace.nextSeq(tl.gen), tl.stack(skip))
+ tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoSyscallBegin, pp.trace.nextSeq(tl.gen), tl.stack(1))
}
// GoSysExit emits a GoSyscallEnd event, possibly along with a GoSyscallBlocked event
if getg() == gp {
nstk += fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf[1:])
} else if gp != nil {
- // Two cases:
+ // Three cases:
//
// (1) We're called on the g0 stack through mcall(fn) or systemstack(fn). To
// behave like gcallers above, we start unwinding from sched.bp, which
// address of the leaf frame is stored in sched.pc, which we manually
// capture here.
//
- // (2) We're called against a gp that we're not currently executing on, in
- // which case it's currently not executing. gp.sched contains the most up-to-date
+ // (2) We're called against a gp that we're not currently executing on, but that isn't
+ // in a syscall, in which case it's currently not executing. gp.sched contains the most
+ // up-to-date information about where it stopped, and like case (1), we match gcallers
+ // here.
+ //
+ // (3) We're called against a gp that we're not currently executing on, but that is in
+ // a syscall, in which case gp.syscallsp != 0. gp.syscall* contains the most up-to-date
// information about where it stopped, and like case (1), we match gcallers here.
- pcBuf[1] = gp.sched.pc
- nstk += 1 + fpTracebackPCs(unsafe.Pointer(gp.sched.bp), pcBuf[2:])
+ if gp.syscallsp != 0 {
+ pcBuf[1] = gp.syscallpc
+ nstk += 1 + fpTracebackPCs(unsafe.Pointer(gp.syscallbp), pcBuf[2:])
+ } else {
+ pcBuf[1] = gp.sched.pc
+ nstk += 1 + fpTracebackPCs(unsafe.Pointer(gp.sched.bp), pcBuf[2:])
+ }
}
}
if nstk > 0 {