// SpanExists records an event indicating that the span exists.
func (tl traceLocker) SpanExists(s *mspan) {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSpan, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvSpan, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
}
// SpanAlloc records an event indicating that the span has just been allocated.
func (tl traceLocker) SpanAlloc(s *mspan) {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSpanAlloc, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvSpanAlloc, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
}
// SpanFree records an event indicating that the span is about to be freed.
func (tl traceLocker) SpanFree(s *mspan) {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSpanFree, traceSpanID(s))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvSpanFree, traceSpanID(s))
}
// traceSpanID creates a trace ID for the span s for the trace.
// The type is optional, and the size of the slot occupied the object is inferred from the
// span containing it.
func (tl traceLocker) HeapObjectExists(addr uintptr, typ *abi.Type) {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapObject, traceHeapObjectID(addr), tl.rtype(typ))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvHeapObject, traceHeapObjectID(addr), tl.rtype(typ))
}
// HeapObjectAlloc records that an object was newly allocated at addr with the provided type.
// The type is optional, and the size of the slot occupied the object is inferred from the
// span containing it.
func (tl traceLocker) HeapObjectAlloc(addr uintptr, typ *abi.Type) {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapObjectAlloc, traceHeapObjectID(addr), tl.rtype(typ))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvHeapObjectAlloc, traceHeapObjectID(addr), tl.rtype(typ))
}
// HeapObjectFree records that an object at addr is about to be freed.
func (tl traceLocker) HeapObjectFree(addr uintptr) {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapObjectFree, traceHeapObjectID(addr))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvHeapObjectFree, traceHeapObjectID(addr))
}
// traceHeapObjectID creates a trace ID for a heap object at address addr.
// GoroutineStackExists records that a goroutine stack already exists at address base with the provided size.
func (tl traceLocker) GoroutineStackExists(base, size uintptr) {
order := traceCompressStackSize(size)
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoroutineStack, traceGoroutineStackID(base), order)
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoroutineStack, traceGoroutineStackID(base), order)
}
// GoroutineStackAlloc records that a goroutine stack was newly allocated at address base with the provided size..
func (tl traceLocker) GoroutineStackAlloc(base, size uintptr) {
order := traceCompressStackSize(size)
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoroutineStackAlloc, traceGoroutineStackID(base), order)
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoroutineStackAlloc, traceGoroutineStackID(base), order)
}
// GoroutineStackFree records that a goroutine stack at address base is about to be freed.
func (tl traceLocker) GoroutineStackFree(base uintptr) {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoroutineStackFree, traceGoroutineStackID(base))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoroutineStackFree, traceGoroutineStackID(base))
}
// traceGoroutineStackID creates a trace ID for the goroutine stack from its base address.
*traceBuf
}
-// write returns an a traceWriter that writes into the current M's stream.
+// writer returns an a traceWriter that writes into the current M's stream.
+//
+// Once this is called, the caller must guard against stack growth until
+// end is called on it. Therefore, it's highly recommended to use this
+// API in a "fluent" style, for example tl.writer().event(...).end().
+// Better yet, callers just looking to write events should use eventWriter
+// when possible, which is a much safer wrapper around this function.
+//
+// nosplit to allow for safe reentrant tracing from stack growth paths.
+//
+//go:nosplit
func (tl traceLocker) writer() traceWriter {
+ if debugTraceReentrancy {
+ // Checks that the invariants of this function are being upheld.
+ gp := getg()
+ if gp == gp.m.curg {
+ tl.mp.trace.oldthrowsplit = gp.throwsplit
+ gp.throwsplit = true
+ }
+ }
return traceWriter{traceLocker: tl, traceBuf: tl.mp.trace.buf[tl.gen%2]}
}
// - Another traceLocker is held.
// - trace.gen is prevented from advancing.
//
+// This does not have the same stack growth restrictions as traceLocker.writer.
+//
// buf may be nil.
func unsafeTraceWriter(gen uintptr, buf *traceBuf) traceWriter {
return traceWriter{traceLocker: traceLocker{gen: gen}, traceBuf: buf}
}
+// event writes out the bytes of an event into the event stream.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
+func (w traceWriter) event(ev traceEv, args ...traceArg) traceWriter {
+ // N.B. Everything in this call must be nosplit to maintain
+ // the stack growth related invariants for writing events.
+
+ // Make sure we have room.
+ w, _ = w.ensure(1 + (len(args)+1)*traceBytesPerNumber)
+
+ // Compute the timestamp diff that we'll put in the trace.
+ ts := traceClockNow()
+ if ts <= w.traceBuf.lastTime {
+ ts = w.traceBuf.lastTime + 1
+ }
+ tsDiff := uint64(ts - w.traceBuf.lastTime)
+ w.traceBuf.lastTime = ts
+
+ // Write out event.
+ w.byte(byte(ev))
+ w.varint(tsDiff)
+ for _, arg := range args {
+ w.varint(uint64(arg))
+ }
+ return w
+}
+
// end writes the buffer back into the m.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (w traceWriter) end() {
if w.mp == nil {
// Tolerate a nil mp. It makes code that creates traceWriters directly
return
}
w.mp.trace.buf[w.gen%2] = w.traceBuf
+ if debugTraceReentrancy {
+ // The writer is no longer live, we can drop throwsplit (if it wasn't
+ // already set upon entry).
+ gp := getg()
+ if gp == gp.m.curg {
+ gp.throwsplit = w.mp.trace.oldthrowsplit
+ }
+ }
}
// ensure makes sure that at least maxSize bytes are available to write.
//
// Returns whether the buffer was flushed.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (w traceWriter) ensure(maxSize int) (traceWriter, bool) {
refill := w.traceBuf == nil || !w.available(maxSize)
if refill {
}
// flush puts w.traceBuf on the queue of full buffers.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (w traceWriter) flush() traceWriter {
systemstack(func() {
lock(&trace.lock)
// refill puts w.traceBuf on the queue of full buffers and refresh's w's buffer.
//
// exp indicates whether the refilled batch should be EvExperimentalBatch.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (w traceWriter) refill(exp traceExperiment) traceWriter {
systemstack(func() {
lock(&trace.lock)
}
// byte appends v to buf.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (buf *traceBuf) byte(v byte) {
buf.arr[buf.pos] = v
buf.pos++
}
// varint appends v to buf in little-endian-base-128 encoding.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (buf *traceBuf) varint(v uint64) {
pos := buf.pos
arr := buf.arr[pos : pos+traceBytesPerNumber]
// varintReserve reserves enough space in buf to hold any varint.
//
// Space reserved this way can be filled in with the varintAt method.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (buf *traceBuf) varintReserve() int {
p := buf.pos
buf.pos += traceBytesPerNumber
}
// stringData appends s's data directly to buf.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (buf *traceBuf) stringData(s string) {
buf.pos += copy(buf.arr[buf.pos:], s)
}
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (buf *traceBuf) available(size int) bool {
return len(buf.arr)-buf.pos >= size
}
// consumes traceBytesPerNumber bytes. This is intended for when the caller
// needs to reserve space for a varint but can't populate it until later.
// Use varintReserve to reserve this space.
+//
+// nosplit because it's part of writing an event for an M, which must not
+// have any stack growth.
+//
+//go:nosplit
func (buf *traceBuf) varintAt(pos int, v uint64) {
for i := 0; i < traceBytesPerNumber; i++ {
if i < traceBytesPerNumber-1 {
// mTraceState is per-M state for the tracer.
type mTraceState struct {
- seqlock atomic.Uintptr // seqlock indicating that this M is writing to a trace buffer.
- buf [2]*traceBuf // Per-M traceBuf for writing. Indexed by trace.gen%2.
- link *m // Snapshot of alllink or freelink.
+ seqlock atomic.Uintptr // seqlock indicating that this M is writing to a trace buffer.
+ buf [2]*traceBuf // Per-M traceBuf for writing. Indexed by trace.gen%2.
+ link *m // Snapshot of alllink or freelink.
+ reentered uint32 // Whether we've reentered tracing from within tracing.
+ oldthrowsplit bool // gp.throwsplit upon calling traceLocker.writer. For debugging.
}
// pTraceState is per-P state for the tracer.
return traceAcquireEnabled()
}
-// traceTryAcquire is like traceAcquire, but may return an invalid traceLocker even
-// if tracing is enabled. For example, it will return !ok if traceAcquire is being
-// called with an active traceAcquire on the M (reentrant locking). This exists for
-// optimistically emitting events in the few contexts where tracing is now allowed.
-//
-// nosplit for alignment with traceTryAcquire, so it can be used in the
-// same contexts.
-//
-//go:nosplit
-func traceTryAcquire() traceLocker {
- if !traceEnabled() {
- return traceLocker{}
- }
- return traceTryAcquireEnabled()
-}
-
// traceAcquireEnabled is the traceEnabled path for traceAcquire. It's explicitly
// broken out to make traceAcquire inlineable to keep the overhead of the tracer
// when it's disabled low.
// Prevent preemption.
mp := acquirem()
+ // Check if we're already tracing. It's safe to be reentrant in general,
+ // because this function (and the invariants of traceLocker.writer) ensure
+ // that it is.
+ if mp.trace.seqlock.Load()%2 == 1 {
+ mp.trace.reentered++
+ return traceLocker{mp, trace.gen.Load()}
+ }
+
// Acquire the trace seqlock. This prevents traceAdvance from moving forward
// until all Ms are observed to be outside of their seqlock critical section.
//
// doing.
seq := mp.trace.seqlock.Add(1)
if debugTraceReentrancy && seq%2 != 1 {
- throw("bad use of trace.seqlock or tracer is reentrant")
+ throw("bad use of trace.seqlock")
}
// N.B. This load of gen appears redundant with the one in traceEnabled.
return traceLocker{mp, gen}
}
-// traceTryAcquireEnabled is like traceAcquireEnabled but may return an invalid
-// traceLocker under some conditions. See traceTryAcquire for more details.
-//
-// nosplit for alignment with traceAcquireEnabled, so it can be used in the
-// same contexts.
-//
-//go:nosplit
-func traceTryAcquireEnabled() traceLocker {
- // Any time we acquire a traceLocker, we may flush a trace buffer. But
- // buffer flushes are rare. Record the lock edge even if it doesn't happen
- // this time.
- lockRankMayTraceFlush()
-
- // Check if we're already locked. If so, return an invalid traceLocker.
- if getg().m.trace.seqlock.Load()%2 == 1 {
- return traceLocker{}
- }
- return traceAcquireEnabled()
-}
-
// ok returns true if the traceLocker is valid (i.e. tracing is enabled).
//
// nosplit because it's called on the syscall path when stack movement is forbidden.
//
//go:nosplit
func traceRelease(tl traceLocker) {
- seq := tl.mp.trace.seqlock.Add(1)
- if debugTraceReentrancy && seq%2 != 0 {
- print("runtime: seq=", seq, "\n")
- throw("bad use of trace.seqlock")
+ if tl.mp.trace.reentered > 0 {
+ tl.mp.trace.reentered--
+ } else {
+ seq := tl.mp.trace.seqlock.Add(1)
+ if debugTraceReentrancy && seq%2 != 0 {
+ print("runtime: seq=", seq, "\n")
+ throw("bad use of trace.seqlock")
+ }
}
releasem(tl.mp)
}
// Gomaxprocs emits a ProcsChange event.
func (tl traceLocker) Gomaxprocs(procs int32) {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvProcsChange, traceArg(procs), tl.stack(1))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvProcsChange, traceArg(procs), tl.stack(1))
}
// ProcStart traces a ProcStart event.
// Procs are typically started within the scheduler when there is no user goroutine. If there is a user goroutine,
// it must be in _Gsyscall because the only time a goroutine is allowed to have its Proc moved around from under it
// is during a syscall.
- tl.eventWriter(traceGoSyscall, traceProcIdle).commit(traceEvProcStart, traceArg(pp.id), pp.trace.nextSeq(tl.gen))
+ tl.eventWriter(traceGoSyscall, traceProcIdle).event(traceEvProcStart, traceArg(pp.id), pp.trace.nextSeq(tl.gen))
}
// ProcStop traces a ProcStop event.
func (tl traceLocker) ProcStop(pp *p) {
// The only time a goroutine is allowed to have its Proc moved around
// from under it is during a syscall.
- tl.eventWriter(traceGoSyscall, traceProcRunning).commit(traceEvProcStop)
+ tl.eventWriter(traceGoSyscall, traceProcRunning).event(traceEvProcStop)
}
// GCActive traces a GCActive event.
// Must be emitted by an actively running goroutine on an active P. This restriction can be changed
// easily and only depends on where it's currently called.
func (tl traceLocker) GCActive() {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCActive, traceArg(trace.seqGC))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGCActive, traceArg(trace.seqGC))
// N.B. Only one GC can be running at a time, so this is naturally
// serialized by the caller.
trace.seqGC++
// Must be emitted by an actively running goroutine on an active P. This restriction can be changed
// easily and only depends on where it's currently called.
func (tl traceLocker) GCStart() {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCBegin, traceArg(trace.seqGC), tl.stack(3))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGCBegin, traceArg(trace.seqGC), tl.stack(3))
// N.B. Only one GC can be running at a time, so this is naturally
// serialized by the caller.
trace.seqGC++
// Must be emitted by an actively running goroutine on an active P. This restriction can be changed
// easily and only depends on where it's currently called.
func (tl traceLocker) GCDone() {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCEnd, traceArg(trace.seqGC))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGCEnd, traceArg(trace.seqGC))
// N.B. Only one GC can be running at a time, so this is naturally
// serialized by the caller.
trace.seqGC++
func (tl traceLocker) STWStart(reason stwReason) {
// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSTWBegin, tl.string(reason.String()), tl.stack(2))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvSTWBegin, tl.string(reason.String()), tl.stack(2))
}
// STWDone traces a STWEnd event.
func (tl traceLocker) STWDone() {
// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSTWEnd)
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvSTWEnd)
}
// GCSweepStart prepares to trace a sweep loop. This does not
pp := tl.mp.p.ptr()
if pp.trace.maySweep {
if pp.trace.swept == 0 {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCSweepBegin, tl.stack(1))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGCSweepBegin, tl.stack(1))
pp.trace.inSweep = true
}
pp.trace.swept += bytesSwept
throw("missing traceGCSweepStart")
}
if pp.trace.inSweep {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCSweepEnd, traceArg(pp.trace.swept), traceArg(pp.trace.reclaimed))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGCSweepEnd, traceArg(pp.trace.swept), traceArg(pp.trace.reclaimed))
pp.trace.inSweep = false
}
pp.trace.maySweep = false
// GCMarkAssistStart emits a MarkAssistBegin event.
func (tl traceLocker) GCMarkAssistStart() {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCMarkAssistBegin, tl.stack(1))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGCMarkAssistBegin, tl.stack(1))
}
// GCMarkAssistDone emits a MarkAssistEnd event.
func (tl traceLocker) GCMarkAssistDone() {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCMarkAssistEnd)
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGCMarkAssistEnd)
}
// GoCreate emits a GoCreate event.
if blocked {
ev = traceEvGoCreateBlocked
}
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(ev, traceArg(newg.goid), tl.startPC(pc), tl.stack(2))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(ev, traceArg(newg.goid), tl.startPC(pc), tl.stack(2))
}
// GoStart emits a GoStart event.
gp := getg().m.curg
pp := gp.m.p
w := tl.eventWriter(traceGoRunnable, traceProcRunning)
- w = w.write(traceEvGoStart, traceArg(gp.goid), gp.trace.nextSeq(tl.gen))
+ w.event(traceEvGoStart, traceArg(gp.goid), gp.trace.nextSeq(tl.gen))
if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
- w = w.write(traceEvGoLabel, trace.markWorkerLabels[tl.gen%2][pp.ptr().gcMarkWorkerMode])
+ w.event(traceEvGoLabel, trace.markWorkerLabels[tl.gen%2][pp.ptr().gcMarkWorkerMode])
}
- w.end()
}
// GoEnd emits a GoDestroy event.
//
// TODO(mknyszek): Rename this to GoDestroy.
func (tl traceLocker) GoEnd() {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoDestroy)
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoDestroy)
}
// GoSched emits a GoStop event with a GoSched reason.
// GoStop emits a GoStop event with the provided reason.
func (tl traceLocker) GoStop(reason traceGoStopReason) {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(1))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(1))
}
// GoPark emits a GoBlock event with the provided reason.
// TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly
// that we have both, and waitReason is way more descriptive.
func (tl traceLocker) GoPark(reason traceBlockReason, skip int) {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoBlock, traceArg(trace.goBlockReasons[tl.gen%2][reason]), tl.stack(skip))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoBlock, traceArg(trace.goBlockReasons[tl.gen%2][reason]), tl.stack(skip))
}
// GoUnpark emits a GoUnblock event.
func (tl traceLocker) GoUnpark(gp *g, skip int) {
// Emit a GoWaiting status if necessary for the unblocked goroutine.
- w := tl.eventWriter(traceGoRunning, traceProcRunning)
- // Careful: don't use the event writer. We never want status or in-progress events
- // to trigger more in-progress events.
- w.w = emitUnblockStatus(w.w, gp, tl.gen)
- w.commit(traceEvGoUnblock, traceArg(gp.goid), gp.trace.nextSeq(tl.gen), tl.stack(skip))
+ tl.emitUnblockStatus(gp, tl.gen)
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoUnblock, traceArg(gp.goid), gp.trace.nextSeq(tl.gen), tl.stack(skip))
}
// GoCoroswitch emits a GoSwitch event. If destroy is true, the calling goroutine
// is simultaneously being destroyed.
func (tl traceLocker) GoSwitch(nextg *g, destroy bool) {
// Emit a GoWaiting status if necessary for the unblocked goroutine.
+ tl.emitUnblockStatus(nextg, tl.gen)
w := tl.eventWriter(traceGoRunning, traceProcRunning)
- // Careful: don't use the event writer. We never want status or in-progress events
- // to trigger more in-progress events.
- w.w = emitUnblockStatus(w.w, nextg, tl.gen)
ev := traceEvGoSwitch
if destroy {
ev = traceEvGoSwitchDestroy
}
- w.commit(ev, traceArg(nextg.goid), nextg.trace.nextSeq(tl.gen))
+ w.event(ev, traceArg(nextg.goid), nextg.trace.nextSeq(tl.gen))
}
// emitUnblockStatus emits a GoStatus GoWaiting event for a goroutine about to be
// unblocked to the trace writer.
-func emitUnblockStatus(w traceWriter, gp *g, gen uintptr) traceWriter {
+func (tl traceLocker) emitUnblockStatus(gp *g, gen uintptr) {
if !gp.trace.statusWasTraced(gen) && gp.trace.acquireStatus(gen) {
// TODO(go.dev/issue/65634): Although it would be nice to add a stack trace here of gp,
// we cannot safely do so. gp is in _Gwaiting and so we don't have ownership of its stack.
// We can fix this by acquiring the goroutine's scan bit.
- w = w.writeGoStatus(gp.goid, -1, traceGoWaiting, gp.inMarkAssist, 0)
+ tl.writer().writeGoStatus(gp.goid, -1, traceGoWaiting, gp.inMarkAssist, 0).end()
}
- return w
}
// GoSysCall emits a GoSyscallBegin event.
// Scribble down the M that the P is currently attached to.
pp := tl.mp.p.ptr()
pp.trace.mSyscallID = int64(tl.mp.procid)
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoSyscallBegin, pp.trace.nextSeq(tl.gen), tl.stack(1))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoSyscallBegin, pp.trace.nextSeq(tl.gen), tl.stack(1))
}
// GoSysExit emits a GoSyscallEnd event, possibly along with a GoSyscallBlocked event
} else {
tl.mp.p.ptr().trace.mSyscallID = -1
}
- tl.eventWriter(traceGoSyscall, procStatus).commit(ev)
+ tl.eventWriter(traceGoSyscall, procStatus).event(ev)
}
// ProcSteal indicates that our current M stole a P from another M.
mStolenFrom := pp.trace.mSyscallID
pp.trace.mSyscallID = -1
+ // Emit the status of the P we're stealing. We may be just about to do this when creating the event
+ // writer but it's not guaranteed, even if inSyscall is true. Although it might seem like from a
+ // syscall context we're always stealing a P for ourselves, we may have not wired it up yet (so
+ // it wouldn't be visible to eventWriter) or we may not even intend to wire it up to ourselves
+ // at all (e.g. entersyscall_gcwait).
+ if !pp.trace.statusWasTraced(tl.gen) && pp.trace.acquireStatus(tl.gen) {
+ // Careful: don't use the event writer. We never want status or in-progress events
+ // to trigger more in-progress events.
+ tl.writer().writeProcStatus(uint64(pp.id), traceProcSyscallAbandoned, pp.trace.inSweep).end()
+ }
+
// The status of the proc and goroutine, if we need to emit one here, is not evident from the
// context of just emitting this event alone. There are two cases. Either we're trying to steal
// the P just to get its attention (e.g. STW or sysmon retake) or we're trying to steal a P for
goStatus = traceGoSyscall
procStatus = traceProcSyscallAbandoned
}
- w := tl.eventWriter(goStatus, procStatus)
-
- // Emit the status of the P we're stealing. We may have *just* done this when creating the event
- // writer but it's not guaranteed, even if inSyscall is true. Although it might seem like from a
- // syscall context we're always stealing a P for ourselves, we may have not wired it up yet (so
- // it wouldn't be visible to eventWriter) or we may not even intend to wire it up to ourselves
- // at all (e.g. entersyscall_gcwait).
- if !pp.trace.statusWasTraced(tl.gen) && pp.trace.acquireStatus(tl.gen) {
- // Careful: don't use the event writer. We never want status or in-progress events
- // to trigger more in-progress events.
- w.w = w.w.writeProcStatus(uint64(pp.id), traceProcSyscallAbandoned, pp.trace.inSweep)
- }
- w.commit(traceEvProcSteal, traceArg(pp.id), pp.trace.nextSeq(tl.gen), traceArg(mStolenFrom))
+ tl.eventWriter(goStatus, procStatus).event(traceEvProcSteal, traceArg(pp.id), pp.trace.nextSeq(tl.gen), traceArg(mStolenFrom))
}
// HeapAlloc emits a HeapAlloc event.
func (tl traceLocker) HeapAlloc(live uint64) {
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapAlloc, traceArg(live))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvHeapAlloc, traceArg(live))
}
// HeapGoal reads the current heap goal and emits a HeapGoal event.
// Heap-based triggering is disabled.
heapGoal = 0
}
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapGoal, traceArg(heapGoal))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvHeapGoal, traceArg(heapGoal))
}
// GoCreateSyscall indicates that a goroutine has transitioned from dead to GoSyscall.
// N.B. We should never trace a status for this goroutine (which we're currently running on),
// since we want this to appear like goroutine creation.
gp.trace.setStatusTraced(tl.gen)
- tl.eventWriter(traceGoBad, traceProcBad).commit(traceEvGoCreateSyscall, traceArg(gp.goid))
+ tl.eventWriter(traceGoBad, traceProcBad).event(traceEvGoCreateSyscall, traceArg(gp.goid))
}
// GoDestroySyscall indicates that a goroutine has transitioned from GoSyscall to dead.
func (tl traceLocker) GoDestroySyscall() {
// N.B. If we trace a status here, we must never have a P, and we must be on a goroutine
// that is in the syscall state.
- tl.eventWriter(traceGoSyscall, traceProcBad).commit(traceEvGoDestroySyscall)
+ tl.eventWriter(traceGoSyscall, traceProcBad).event(traceEvGoDestroySyscall)
}
// To access runtime functions from runtime/trace.
// Need to do this check because the caller won't have it.
return
}
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserTaskBegin, traceArg(id), traceArg(parentID), tl.string(taskType), tl.stack(3))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvUserTaskBegin, traceArg(id), traceArg(parentID), tl.string(taskType), tl.stack(3))
traceRelease(tl)
}
// Need to do this check because the caller won't have it.
return
}
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserTaskEnd, traceArg(id), tl.stack(2))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvUserTaskEnd, traceArg(id), tl.stack(2))
traceRelease(tl)
}
default:
return
}
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(ev, traceArg(id), tl.string(name), tl.stack(3))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(ev, traceArg(id), tl.string(name), tl.stack(3))
traceRelease(tl)
}
// Need to do this check because the caller won't have it.
return
}
- tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserLog, traceArg(id), tl.string(category), tl.uniqueString(message), tl.stack(3))
+ tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvUserLog, traceArg(id), tl.string(category), tl.uniqueString(message), tl.stack(3))
traceRelease(tl)
}
// as well.
seq := mp.trace.seqlock.Add(1)
if debugTraceReentrancy && seq%2 != 1 {
- throw("bad use of trace.seqlock or tracer is reentrant")
+ throw("bad use of trace.seqlock")
}
systemstack(func() {
lock(&trace.lock)