// Update it under gcsema to avoid gctrace getting wrong values.
work.userForced = trigger.kind == gcTriggerCycle
- if trace.enabled {
+ if traceEnabled() {
traceGCStart()
}
now := nanotime()
work.tSweepTerm = now
work.pauseStart = now
- if trace.enabled {
+ if traceEnabled() {
traceGCSTWStart(1)
}
systemstack(stopTheWorldWithSema)
// Concurrent mark.
systemstack(func() {
- now = startTheWorldWithSema(trace.enabled)
+ now = startTheWorldWithSema(traceEnabled())
work.pauseNS += now - work.pauseStart
work.tMark = now
memstats.gcPauseDist.record(now - work.pauseStart)
work.tMarkTerm = now
work.pauseStart = now
getg().m.preemptoff = "gcing"
- if trace.enabled {
+ if traceEnabled() {
traceGCSTWStart(0)
}
systemstack(stopTheWorldWithSema)
if restart {
getg().m.preemptoff = ""
systemstack(func() {
- now := startTheWorldWithSema(trace.enabled)
+ now := startTheWorldWithSema(traceEnabled())
work.pauseNS += now - work.pauseStart
memstats.gcPauseDist.record(now - work.pauseStart)
})
mp.traceback = 0
casgstatus(curgp, _Gwaiting, _Grunning)
- if trace.enabled {
+ if traceEnabled() {
traceGCDone()
}
throw("failed to set sweep barrier")
}
- systemstack(func() { startTheWorldWithSema(trace.enabled) })
+ systemstack(func() { startTheWorldWithSema(traceEnabled()) })
// Flush the heap profile so we can start a new cycle next GC.
// This is relatively expensive, so we don't do it with the
// Mark gp ready to run.
func ready(gp *g, traceskip int, next bool) {
- if trace.enabled {
+ if traceEnabled() {
traceGoUnpark(gp, traceskip)
}
for _, pp := range allp {
s := pp.status
if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
- if trace.enabled {
+ if traceEnabled() {
traceGoSysBlock(pp)
traceProcStop(pp)
}
for _, p2 := range allp {
s := p2.status
if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
- if trace.enabled {
+ if traceEnabled() {
traceGoSysBlock(p2)
traceProcStop(p2)
}
if raceenabled {
gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
}
- if trace.enabled {
+ if traceEnabled() {
// Trigger two trace events for the locked g in the extra m,
// since the next event of the g will be traceEvGoSysExit in exitsyscall,
// while calling from C thread to Go.
return
}
// if there's trace work to do, start it straight away
- if (trace.enabled || trace.shutdown) && traceReaderAvailable() != nil {
+ if (traceEnabled() || trace.shutdown) && traceReaderAvailable() != nil {
startm(pp, false, false)
return
}
setThreadCPUProfiler(hz)
}
- if trace.enabled {
+ if traceEnabled() {
// GoSysExit has to happen when we have a P, but before GoStart.
// So we emit it here.
if gp.syscallsp != 0 && gp.sysblocktraced {
now, pollUntil, _ := checkTimers(pp, 0)
// Try to schedule the trace reader.
- if trace.enabled || trace.shutdown {
+ if traceEnabled() || trace.shutdown {
gp := traceReader()
if gp != nil {
casgstatus(gp, _Gwaiting, _Grunnable)
gp := list.pop()
injectglist(&list)
casgstatus(gp, _Gwaiting, _Grunnable)
- if trace.enabled {
+ if traceEnabled() {
traceGoUnpark(gp, 0)
}
return gp, false, false
pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
gp := node.gp.ptr()
casgstatus(gp, _Gwaiting, _Grunnable)
- if trace.enabled {
+ if traceEnabled() {
traceGoUnpark(gp, 0)
}
return gp, false, false
gp, otherReady := beforeIdle(now, pollUntil)
if gp != nil {
casgstatus(gp, _Gwaiting, _Grunnable)
- if trace.enabled {
+ if traceEnabled() {
traceGoUnpark(gp, 0)
}
return gp, false, false
// Run the idle worker.
pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
casgstatus(gp, _Gwaiting, _Grunnable)
- if trace.enabled {
+ if traceEnabled() {
traceGoUnpark(gp, 0)
}
return gp, false, false
gp := list.pop()
injectglist(&list)
casgstatus(gp, _Gwaiting, _Grunnable)
- if trace.enabled {
+ if traceEnabled() {
traceGoUnpark(gp, 0)
}
return gp, false, false
if glist.empty() {
return
}
- if trace.enabled {
+ if traceEnabled() {
for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
traceGoUnpark(gp, 0)
}
func park_m(gp *g) {
mp := getg().m
- if trace.enabled {
+ if traceEnabled() {
traceGoPark(mp.waittraceev, mp.waittraceskip)
}
mp.waitunlockf = nil
mp.waitlock = nil
if !ok {
- if trace.enabled {
+ if traceEnabled() {
traceGoUnpark(gp, 2)
}
casgstatus(gp, _Gwaiting, _Grunnable)
// Gosched continuation on g0.
func gosched_m(gp *g) {
- if trace.enabled {
+ if traceEnabled() {
traceGoSched()
}
goschedImpl(gp)
gogo(&gp.sched) // never return
}
- if trace.enabled {
+ if traceEnabled() {
traceGoSched()
}
goschedImpl(gp)
}
func gopreempt_m(gp *g) {
- if trace.enabled {
+ if traceEnabled() {
traceGoPreempt()
}
goschedImpl(gp)
//
//go:systemstack
func preemptPark(gp *g) {
- if trace.enabled {
+ if traceEnabled() {
traceGoPark(traceEvGoBlock, 0)
}
status := readgstatus(gp)
}
func goyield_m(gp *g) {
- if trace.enabled {
+ if traceEnabled() {
traceGoPreempt()
}
pp := gp.m.p.ptr()
if raceenabled {
racegoend()
}
- if trace.enabled {
+ if traceEnabled() {
traceGoEnd()
}
mcall(goexit0)
})
}
- if trace.enabled {
+ if traceEnabled() {
systemstack(traceGoSysCall)
// systemstack itself clobbers g.sched.{pc,sp} and we might
// need them later when the G is genuinely blocked in a
lock(&sched.lock)
if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
- if trace.enabled {
+ if traceEnabled() {
traceGoSysBlock(pp)
traceProcStop(pp)
}
}
func entersyscallblock_handoff() {
- if trace.enabled {
+ if traceEnabled() {
traceGoSysCall()
traceGoSysBlock(getg().m.p.ptr())
}
tryRecordGoroutineProfileWB(gp)
})
}
- if trace.enabled {
+ if traceEnabled() {
if oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick {
systemstack(traceGoStart)
}
}
gp.sysexitticks = 0
- if trace.enabled {
+ if traceEnabled() {
// Wait till traceGoSysBlock event is emitted.
// This ensures consistency of the trace (the goroutine is started after it is blocked).
for oldp != nil && oldp.syscalltick == gp.m.syscalltick {
var ok bool
systemstack(func() {
ok = exitsyscallfast_pidle()
- if ok && trace.enabled {
+ if ok && traceEnabled() {
if oldp != nil {
// Wait till traceGoSysBlock event is emitted.
// This ensures consistency of the trace (the goroutine is started after it is blocked).
func exitsyscallfast_reacquired() {
gp := getg()
if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
- if trace.enabled {
+ if traceEnabled() {
// The p was retaken and then enter into syscall again (since gp.m.syscalltick has changed).
// traceGoSysBlock for this syscall was already emitted,
// but here we effectively retake the p from the new syscall running on the same p.
racereleasemergeg(newg, unsafe.Pointer(&labelSync))
}
}
- if trace.enabled {
+ if traceEnabled() {
traceGoCreate(newg, newg.startpc)
}
releasem(mp)
if old < 0 || nprocs <= 0 {
throw("procresize: invalid arg")
}
- if trace.enabled {
+ if traceEnabled() {
traceGomaxprocs(nprocs)
}
// because p.destroy itself has write barriers, so we
// need to do that from a valid P.
if gp.m.p != 0 {
- if trace.enabled {
+ if traceEnabled() {
// Pretend that we were descheduled
// and then scheduled again to keep
// the trace sane.
pp.m = 0
pp.status = _Pidle
acquirep(pp)
- if trace.enabled {
+ if traceEnabled() {
traceGoStart()
}
}
// from a potentially stale mcache.
pp.mcache.prepareForSweep()
- if trace.enabled {
+ if traceEnabled() {
traceProcStart()
}
}
print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
throw("releasep: invalid p state")
}
- if trace.enabled {
+ if traceEnabled() {
traceProcStop(gp.m.p.ptr())
}
gp.m.p = 0
// increment nmidle and report deadlock.
incidlelocked(-1)
if atomic.Cas(&pp.status, s, _Pidle) {
- if trace.enabled {
+ if traceEnabled() {
traceGoSysBlock(pp)
traceProcStop(pp)
}