//
//go:nowritebarrierrec
func startpanic_m() bool {
- _g_ := getg()
+ gp := getg()
if mheap_.cachealloc.size == 0 { // very early
print("runtime: panic before malloc heap initialized\n")
}
// could happen in a signal handler, or in a throw, or inside
// malloc itself. We want to catch if an allocation ever does
// happen (even if we're not in one of these situations).
- _g_.m.mallocing++
+ gp.m.mallocing++
// If we're dying because of a bad lock count, set it to a
// good lock count so we don't recursively panic below.
- if _g_.m.locks < 0 {
- _g_.m.locks = 1
+ if gp.m.locks < 0 {
+ gp.m.locks = 1
}
- switch _g_.m.dying {
+ switch gp.m.dying {
case 0:
// Setting dying >0 has the side-effect of disabling this G's writebuf.
- _g_.m.dying = 1
+ gp.m.dying = 1
atomic.Xadd(&panicking, 1)
lock(&paniclk)
if debug.schedtrace > 0 || debug.scheddetail > 0 {
case 1:
// Something failed while panicking.
// Just print a stack trace and exit.
- _g_.m.dying = 2
+ gp.m.dying = 2
print("panic during panic\n")
return false
case 2:
// This is a genuine bug in the runtime, we couldn't even
// print the stack trace successfully.
- _g_.m.dying = 3
+ gp.m.dying = 3
print("stack trace unavailable\n")
exit(4)
fallthrough
// Non-synchronization events (memory accesses, function entry/exit) still affect
// the race detector.
func RaceDisable() {
- _g_ := getg()
- if _g_.raceignore == 0 {
- racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0)
+ gp := getg()
+ if gp.raceignore == 0 {
+ racecall(&__tsan_go_ignore_sync_begin, gp.racectx, 0, 0, 0)
}
- _g_.raceignore++
+ gp.raceignore++
}
//go:nosplit
// RaceEnable re-enables handling of race events in the current goroutine.
func RaceEnable() {
- _g_ := getg()
- _g_.raceignore--
- if _g_.raceignore == 0 {
- racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0)
+ gp := getg()
+ gp.raceignore--
+ if gp.raceignore == 0 {
+ racecall(&__tsan_go_ignore_sync_end, gp.racectx, 0, 0, 0)
}
}
//go:nosplit
func racegostart(pc uintptr) uintptr {
- _g_ := getg()
+ gp := getg()
var spawng *g
- if _g_.m.curg != nil {
- spawng = _g_.m.curg
+ if gp.m.curg != nil {
+ spawng = gp.m.curg
} else {
- spawng = _g_
+ spawng = gp
}
var racectx uintptr
//go:nosplit
func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
- _g_ := getg()
- if _g_ != _g_.m.curg {
+ gp := getg()
+ if gp != gp.m.curg {
// The call is coming from manual instrumentation of Go code running on g0/gsignal.
// Not interesting.
return
//go:nosplit
func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
- _g_ := getg()
- if _g_ != _g_.m.curg {
+ gp := getg()
+ if gp != gp.m.curg {
// The call is coming from manual instrumentation of Go code running on g0/gsignal.
// Not interesting.
return
//go:linkname setPanicOnFault runtime/debug.setPanicOnFault
func setPanicOnFault(new bool) (old bool) {
- _g_ := getg()
- old = _g_.paniconfault
- _g_.paniconfault = new
+ gp := getg()
+ old = gp.paniconfault
+ gp.paniconfault = new
return old
}
//
//go:nosplit
func gotraceback() (level int32, all, crash bool) {
- _g_ := getg()
+ gp := getg()
t := atomic.Load(&traceback_cache)
crash = t&tracebackCrash != 0
- all = _g_.m.throwing >= throwTypeUser || t&tracebackAll != 0
- if _g_.m.traceback != 0 {
- level = int32(_g_.m.traceback)
- } else if _g_.m.throwing >= throwTypeRuntime {
+ all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0
+ if gp.m.traceback != 0 {
+ level = int32(gp.m.traceback)
+ } else if gp.m.throwing >= throwTypeRuntime {
// Always include runtime frames in runtime throws unless
// otherwise overridden by m.traceback.
level = 2
//go:nosplit
func acquirem() *m {
- _g_ := getg()
- _g_.m.locks++
- return _g_.m
+ gp := getg()
+ gp.m.locks++
+ return gp.m
}
//go:nosplit
func releasem(mp *m) {
- _g_ := getg()
+ gp := getg()
mp.locks--
- if mp.locks == 0 && _g_.preempt {
+ if mp.locks == 0 && gp.preempt {
// restore the preemption request in case we've cleared it in newstack
- _g_.stackguard0 = stackPreempt
+ gp.stackguard0 = stackPreempt
}
}
}
func traceGoStart() {
- _g_ := getg().m.curg
- pp := _g_.m.p
- _g_.traceseq++
+ gp := getg().m.curg
+ pp := gp.m.p
+ gp.traceseq++
if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
- traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[pp.ptr().gcMarkWorkerMode])
- } else if _g_.tracelastp == pp {
- traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
+ traceEvent(traceEvGoStartLabel, -1, uint64(gp.goid), gp.traceseq, trace.markWorkerLabels[pp.ptr().gcMarkWorkerMode])
+ } else if gp.tracelastp == pp {
+ traceEvent(traceEvGoStartLocal, -1, uint64(gp.goid))
} else {
- _g_.tracelastp = pp
- traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
+ gp.tracelastp = pp
+ traceEvent(traceEvGoStart, -1, uint64(gp.goid), gp.traceseq)
}
}
}
func traceGoSched() {
- _g_ := getg()
- _g_.tracelastp = _g_.m.p
+ gp := getg()
+ gp.tracelastp = gp.m.p
traceEvent(traceEvGoSched, 1)
}
func traceGoPreempt() {
- _g_ := getg()
- _g_.tracelastp = _g_.m.p
+ gp := getg()
+ gp.tracelastp = gp.m.p
traceEvent(traceEvGoPreempt, 1)
}
// aka right now), and assign a fresh time stamp to keep the log consistent.
ts = 0
}
- _g_ := getg().m.curg
- _g_.traceseq++
- _g_.tracelastp = _g_.m.p
- traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
+ gp := getg().m.curg
+ gp.traceseq++
+ gp.tracelastp = gp.m.p
+ traceEvent(traceEvGoSysExit, -1, uint64(gp.goid), gp.traceseq, uint64(ts)/traceTickDiv)
}
func traceGoSysBlock(pp *p) {