// Try to schedule the trace reader.
if trace.enabled || trace.shutdown {
- gp = traceReader()
+ gp := traceReader()
if gp != nil {
casgstatus(gp, _Gwaiting, _Grunnable)
traceGoUnpark(gp, 0)
// Try to schedule a GC worker.
if gcBlackenEnabled != 0 {
- gp, now = gcController.findRunnableGCWorker(pp, now)
+ gp, tnow := gcController.findRunnableGCWorker(pp, now)
if gp != nil {
return gp, false, true
}
+ now = tnow
}
// Check the global runnable queue once in a while to ensure fairness.
// by constantly respawning each other.
if pp.schedtick%61 == 0 && sched.runqsize > 0 {
lock(&sched.lock)
- gp = globrunqget(pp, 1)
+ gp := globrunqget(pp, 1)
unlock(&sched.lock)
if gp != nil {
return gp, false, false
}
gp, inheritTime, tnow, w, newWork := stealWork(now)
- now = tnow
if gp != nil {
// Successfully stole.
return gp, inheritTime, false
// discover.
goto top
}
+
+ now = tnow
if w != 0 && (pollUntil == 0 || w < pollUntil) {
// Earlier timer to wait for.
pollUntil = w
// latency. See golang.org/issue/43997.
// Check all runqueues once again.
- pp = checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
+ pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
if pp != nil {
acquirep(pp)
mp.spinning = true
}
// Check for idle-priority GC work again.
- pp, gp = checkIdleGCNoP()
+ pp, gp := checkIdleGCNoP()
if pp != nil {
acquirep(pp)
mp.spinning = true
goto top
}
lock(&sched.lock)
- pp, _ = pidleget(now)
+ pp, _ := pidleget(now)
unlock(&sched.lock)
if pp == nil {
injectglist(&list)
noStackQ gQueue
)
for pp.gFree.n >= 32 {
- gp = pp.gFree.pop()
+ gp := pp.gFree.pop()
pp.gFree.n--
if gp.stack.lo == 0 {
noStackQ.push(gp)