}
func dumpgstatus(gp *g) {
- _g_ := getg()
- print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
- print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
+ thisg := getg()
+ print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
+ print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
}
// sched.lock must be held.
status := readgstatus(gp)
// Mark runnable.
- _g_ := getg()
mp := acquirem() // disable preemption because it can be holding p in a local var
if status&^_Gscan != _Gwaiting {
dumpgstatus(gp)
// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
casgstatus(gp, _Gwaiting, _Grunnable)
- runqput(_g_.m.p.ptr(), gp, next)
+ runqput(mp.p.ptr(), gp, next)
wakep()
releasem(mp)
}
//
//go:nowritebarrierrec
func startlockedm(gp *g) {
- _g_ := getg()
-
mp := gp.lockedm.ptr()
- if mp == _g_.m {
+ if mp == getg().m {
throw("startlockedm: locked to me")
}
if mp.nextp != 0 {
//
//go:yeswritebarrierrec
func execute(gp *g, inheritTime bool) {
- _g_ := getg()
+ mp := getg().m
if goroutineProfile.active {
// Make sure that gp has had its stack written out to the goroutine
// Assign gp.m before entering _Grunning so running Gs have an
// M.
- _g_.m.curg = gp
- gp.m = _g_.m
+ mp.curg = gp
+ gp.m = mp
casgstatus(gp, _Grunnable, _Grunning)
gp.waitsince = 0
gp.preempt = false
gp.stackguard0 = gp.stack.lo + _StackGuard
if !inheritTime {
- _g_.m.p.ptr().schedtick++
+ mp.p.ptr().schedtick++
}
// Check whether the profiler needs to be turned on or off.
hz := sched.profilehz
- if _g_.m.profilehz != hz {
+ if mp.profilehz != hz {
setThreadCPUProfiler(hz)
}
// tryWakeP indicates that the returned goroutine is not normal (GC worker, trace
// reader) so the caller should try to wake a P.
func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
- _g_ := getg()
+ mp := getg().m
// The conditions here and in handoffp must agree: if
// findrunnable would return a G to run, handoffp must start
// an M.
top:
- pp := _g_.m.p.ptr()
+ pp := mp.p.ptr()
if sched.gcwaiting != 0 {
gcstopm()
goto top
// This is necessary to prevent excessive CPU consumption when
// GOMAXPROCS>>1 but the program parallelism is low.
procs := uint32(gomaxprocs)
- if _g_.m.spinning || 2*atomic.Load(&sched.nmspinning) < procs-atomic.Load(&sched.npidle) {
- if !_g_.m.spinning {
- _g_.m.spinning = true
+ if mp.spinning || 2*atomic.Load(&sched.nmspinning) < procs-atomic.Load(&sched.npidle) {
+ if !mp.spinning {
+ mp.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
}
// we also observe no idle Ps it is OK to skip unparking a new worker
// thread: the system is fully loaded so no spinning threads are required.
// Also see "Worker thread parking/unparking" comment at the top of the file.
- wasSpinning := _g_.m.spinning
- if _g_.m.spinning {
- _g_.m.spinning = false
+ wasSpinning := mp.spinning
+ if mp.spinning {
+ mp.spinning = false
if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
throw("findrunnable: negative nmspinning")
}
pp = checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
if pp != nil {
acquirep(pp)
- _g_.m.spinning = true
+ mp.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
goto top
}
pp, gp = checkIdleGCNoP()
if pp != nil {
acquirep(pp)
- _g_.m.spinning = true
+ mp.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
// Run the idle worker.
// Poll network until next timer.
if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
atomic.Store64(&sched.pollUntil, uint64(pollUntil))
- if _g_.m.p != 0 {
+ if mp.p != 0 {
throw("findrunnable: netpoll with p")
}
- if _g_.m.spinning {
+ if mp.spinning {
throw("findrunnable: netpoll with spinning")
}
// Refresh now.
return gp, false, false
}
if wasSpinning {
- _g_.m.spinning = true
+ mp.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
}
goto top
// One round of scheduler: find a runnable goroutine and execute it.
// Never returns.
func schedule() {
- _g_ := getg()
+ mp := getg().m
- if _g_.m.locks != 0 {
+ if mp.locks != 0 {
throw("schedule: holding locks")
}
- if _g_.m.lockedg != 0 {
+ if mp.lockedg != 0 {
stoplockedm()
- execute(_g_.m.lockedg.ptr(), false) // Never returns.
+ execute(mp.lockedg.ptr(), false) // Never returns.
}
// We should not schedule away from a g that is executing a cgo call,
// since the cgo call is using the m's g0 stack.
- if _g_.m.incgo {
+ if mp.incgo {
throw("schedule: in cgo")
}
top:
- pp := _g_.m.p.ptr()
+ pp := mp.p.ptr()
pp.preempt = false
// Safety check: if we are spinning, the run queue should be empty.
// Check this before calling checkTimers, as that might call
// goready to put a ready goroutine on the local run queue.
- if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
+ if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
throw("schedule: spinning with local work")
}
// This thread is going to run a goroutine and is not spinning anymore,
// so if it was marked as spinning we need to reset it now and potentially
// start a new spinning M.
- if _g_.m.spinning {
+ if mp.spinning {
resetspinning()
}
// park continuation on g0.
func park_m(gp *g) {
- _g_ := getg()
+ mp := getg().m
if trace.enabled {
- traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
+ traceGoPark(mp.waittraceev, mp.waittraceskip)
}
casgstatus(gp, _Grunning, _Gwaiting)
dropg()
- if fn := _g_.m.waitunlockf; fn != nil {
- ok := fn(gp, _g_.m.waitlock)
- _g_.m.waitunlockf = nil
- _g_.m.waitlock = nil
+ if fn := mp.waitunlockf; fn != nil {
+ ok := fn(gp, mp.waitlock)
+ mp.waitunlockf = nil
+ mp.waitlock = nil
if !ok {
if trace.enabled {
traceGoUnpark(gp, 2)
// goexit continuation on g0.
func goexit0(gp *g) {
- _g_ := getg()
- pp := _g_.m.p.ptr()
+ mp := getg().m
+ pp := mp.p.ptr()
casgstatus(gp, _Grunning, _Gdead)
gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
gp.m = nil
locked := gp.lockedm != 0
gp.lockedm = 0
- _g_.m.lockedg = 0
+ mp.lockedg = 0
gp.preemptStop = false
gp.paniconfault = false
gp._defer = nil // should be true already but just in case.
schedule() // never returns
}
- if _g_.m.lockedInt != 0 {
- print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
+ if mp.lockedInt != 0 {
+ print("invalid m->lockedInt = ", mp.lockedInt, "\n")
throw("internal lockOSThread error")
}
gfput(pp, gp)
// Return to mstart, which will release the P and exit
// the thread.
if GOOS != "plan9" { // See golang.org/issue/22227.
- gogo(&_g_.m.g0.sched)
+ gogo(&mp.g0.sched)
} else {
// Clear lockedExt on plan9 since we may end up re-using
// this thread.
- _g_.m.lockedExt = 0
+ mp.lockedExt = 0
}
}
schedule()
// address of the go statement that created this. The caller is responsible
// for adding the new g to the scheduler.
func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
- _g_ := getg()
-
if fn == nil {
fatal("go of nil func value")
}
- acquirem() // disable preemption because it can be holding p in a local var
- pp := _g_.m.p.ptr()
+ mp := acquirem() // disable preemption because we hold M and P in local vars.
+ pp := mp.p.ptr()
newg := gfget(pp)
if newg == nil {
newg = malg(_StackMin)
atomic.Xadd(&sched.ngsys, +1)
} else {
// Only user goroutines inherit pprof labels.
- if _g_.m.curg != nil {
- newg.labels = _g_.m.curg.labels
+ if mp.curg != nil {
+ newg.labels = mp.curg.labels
}
if goroutineProfile.active {
// A concurrent goroutine profile is running. It should include
if trace.enabled {
traceGoCreate(newg, newg.startpc)
}
- releasem(_g_.m)
+ releasem(mp)
return newg
}