package runtime
import (
+ "runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
print("hit ", gp.stkbarPos, " stack barriers, goid=", gp.goid, "\n")
}
+ gcLockStackBarriers(gp)
+
// Remove stack barriers that we didn't hit.
for _, stkbar := range gp.stkbar[gp.stkbarPos:] {
gcRemoveStackBarrier(gp, stkbar)
// adjust them.
gp.stkbarPos = 0
gp.stkbar = gp.stkbar[:0]
+
+ gcUnlockStackBarriers(gp)
}
// gcRemoveStackBarrier removes a single stack barrier. It is the
//
//go:nosplit
func gcUnwindBarriers(gp *g, sp uintptr) {
+ gcLockStackBarriers(gp)
// On LR machines, if there is a stack barrier on the return
// from the frame containing sp, this will mark it as hit even
// though it isn't, but it's okay to be conservative.
gcRemoveStackBarrier(gp, gp.stkbar[gp.stkbarPos])
gp.stkbarPos++
}
+ gcUnlockStackBarriers(gp)
if debugStackBarrier && gp.stkbarPos != before {
print("skip barriers below ", hex(sp), " in goid=", gp.goid, ": ")
gcPrintStkbars(gp.stkbar[before:gp.stkbarPos])
gp := getg()
gp.stkbar[gp.stkbarPos].savedLRVal = pc
}
+
+// gcLockStackBarriers synchronizes with tracebacks of gp's stack
+// during sigprof for installation or removal of stack barriers. It
+// blocks until any current sigprof is done tracebacking gp's stack
+// and then disallows profiling tracebacks of gp's stack.
+//
+// This is necessary because a sigprof during barrier installation or
+// removal could observe inconsistencies between the stkbar array and
+// the stack itself and crash.
+func gcLockStackBarriers(gp *g) {
+ for !atomic.Cas(&gp.stackLock, 0, 1) {
+ osyield()
+ }
+}
+
+func gcTryLockStackBarriers(gp *g) bool {
+ return atomic.Cas(&gp.stackLock, 0, 1)
+}
+
+func gcUnlockStackBarriers(gp *g) {
+ atomic.Store(&gp.stackLock, 0)
+}
// the goroutine until we're done.
if castogscanstatus(gp, s, s|_Gscan) {
if !gp.gcscandone {
- // Coordinate with traceback
- // in sigprof.
- for !atomic.Cas(&gp.stackLock, 0, 1) {
- osyield()
- }
scanstack(gp)
- atomic.Store(&gp.stackLock, 0)
gp.gcscandone = true
}
restartg(gp)
// Profiling runs concurrently with GC, so it must not allocate.
mp.mallocing++
- // Coordinate with stack barrier insertion in scanstack.
- for !atomic.Cas(&gp.stackLock, 0, 1) {
- osyield()
- }
-
// Define that a "user g" is a user-created goroutine, and a "system g"
// is one that is m->g0 or m->gsignal.
//
// transition. We simply require that g and SP match and that the PC is not
// in gogo.
traceback := true
+ haveStackLock := false
if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) {
traceback = false
+ } else if gp.m.curg != nil {
+ if gcTryLockStackBarriers(gp.m.curg) {
+ haveStackLock = true
+ } else {
+ // Stack barriers are being inserted or
+ // removed, so we can't get a consistent
+ // traceback right now.
+ traceback = false
+ }
}
var stk [maxCPUProfStack]uintptr
n := 0
}
}
}
- atomic.Store(&gp.stackLock, 0)
+ if haveStackLock {
+ gcUnlockStackBarriers(gp.m.curg)
+ }
if prof.hz != 0 {
// Simple cas-lock to coordinate with setcpuprofilerate.