]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: allow experimental trace batches to be reused
authorAustin Clements <austin@google.com>
Mon, 24 Jun 2024 16:19:00 +0000 (12:19 -0400)
committerGopher Robot <gobot@golang.org>
Fri, 26 Jul 2024 18:46:28 +0000 (18:46 +0000)
Currently, we can only cache regular trace event buffers on each M. As
a result, calling unsafeTraceExpWriter will, in effect, always return
a new trace batch, with all of the overhead that entails.

This extends that cache to support buffers for experimental trace
data. This way, unsafeTraceExpWriter can return a partially used
buffer, which the caller can continue to extend. This gives the caller
control over when these buffers get flushed and reuses all of the
existing trace buffering mechanism.

This also has the consequence of simplifying the experimental batch
infrastructure a bit. Now, traceWriter needs to know the experiment ID
anyway, which means there's no need for a separate traceExpWriter
type.

Change-Id: Idc2100176c5d02e0fbb229dc8aa4aea2b1cf5231
Reviewed-on: https://go-review.googlesource.com/c/go/+/594595
Auto-Submit: Austin Clements <austin@google.com>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>

src/runtime/trace.go
src/runtime/tracebuf.go
src/runtime/traceexp.go
src/runtime/traceruntime.go
src/runtime/tracetype.go

index adf7b0951dce4b414d4c8d8fb23eaf709a9ef111..bc2978bb4bf6ef3f187a48b7e021fc14fe9f1047 100644 (file)
@@ -524,10 +524,11 @@ func traceAdvance(stopTrace bool) {
                                // trace.lock needed for traceBufFlush, but also to synchronize
                                // with traceThreadDestroy, which flushes both buffers unconditionally.
                                lock(&trace.lock)
-                               bufp := &mp.trace.buf[gen%2]
-                               if *bufp != nil {
-                                       traceBufFlush(*bufp, gen)
-                                       *bufp = nil
+                               for exp, buf := range mp.trace.buf[gen%2] {
+                                       if buf != nil {
+                                               traceBufFlush(buf, gen)
+                                               mp.trace.buf[gen%2][exp] = nil
+                                       }
                                }
                                unlock(&trace.lock)
 
index be6e3e582bd57263ee42677473ad13530049a1ef..0849a57809e2ca7eeeff218feec52ba3a1715c6c 100644 (file)
@@ -24,6 +24,7 @@ const traceBytesPerNumber = 10
 // we can change it if it's deemed too error-prone.
 type traceWriter struct {
        traceLocker
+       exp traceExperiment
        *traceBuf
 }
 
@@ -47,7 +48,7 @@ func (tl traceLocker) writer() traceWriter {
                        gp.throwsplit = true
                }
        }
-       return traceWriter{traceLocker: tl, traceBuf: tl.mp.trace.buf[tl.gen%2]}
+       return traceWriter{traceLocker: tl, traceBuf: tl.mp.trace.buf[tl.gen%2][traceNoExperiment]}
 }
 
 // unsafeTraceWriter produces a traceWriter that doesn't lock the trace.
@@ -105,7 +106,7 @@ func (w traceWriter) end() {
                // less error-prone.
                return
        }
-       w.mp.trace.buf[w.gen%2] = w.traceBuf
+       w.mp.trace.buf[w.gen%2][w.exp] = w.traceBuf
        if debugTraceReentrancy {
                // The writer is no longer live, we can drop throwsplit (if it wasn't
                // already set upon entry).
@@ -127,7 +128,7 @@ func (w traceWriter) end() {
 func (w traceWriter) ensure(maxSize int) (traceWriter, bool) {
        refill := w.traceBuf == nil || !w.available(maxSize)
        if refill {
-               w = w.refill(traceNoExperiment)
+               w = w.refill()
        }
        return w, refill
 }
@@ -151,14 +152,7 @@ func (w traceWriter) flush() traceWriter {
 }
 
 // refill puts w.traceBuf on the queue of full buffers and refresh's w's buffer.
-//
-// exp indicates whether the refilled batch should be EvExperimentalBatch.
-//
-// nosplit because it's part of writing an event for an M, which must not
-// have any stack growth.
-//
-//go:nosplit
-func (w traceWriter) refill(exp traceExperiment) traceWriter {
+func (w traceWriter) refill() traceWriter {
        systemstack(func() {
                lock(&trace.lock)
                if w.traceBuf != nil {
@@ -192,11 +186,11 @@ func (w traceWriter) refill(exp traceExperiment) traceWriter {
        }
 
        // Write the buffer's header.
-       if exp == traceNoExperiment {
+       if w.exp == traceNoExperiment {
                w.byte(byte(traceEvEventBatch))
        } else {
                w.byte(byte(traceEvExperimentalBatch))
-               w.byte(byte(exp))
+               w.byte(byte(w.exp))
        }
        w.varint(uint64(w.gen))
        w.varint(uint64(mID))
index 1438191a9137c1033bdcccf15854e927851e6a82..13eec0c0b667062325c652276bb745cbc1929f06 100644 (file)
@@ -4,15 +4,15 @@
 
 package runtime
 
-// traceExpWriter is a wrapper around trace writer that produces traceEvExperimentalBatch
-// batches. This means that the data written to the writer need not conform to the standard
-// trace format.
-type traceExpWriter struct {
-       traceWriter
-       exp traceExperiment
+// expWriter returns a traceWriter that writes into the current M's stream for
+// the given experiment.
+func (tl traceLocker) expWriter(exp traceExperiment) traceWriter {
+       return traceWriter{traceLocker: tl, traceBuf: tl.mp.trace.buf[tl.gen%2][exp], exp: exp}
 }
 
-// unsafeTraceExpWriter produces a traceExpWriter that doesn't lock the trace.
+// unsafeTraceExpWriter produces a traceWriter for experimental trace batches
+// that doesn't lock the trace. Data written to experimental batches need not
+// conform to the standard trace format.
 //
 // It should only be used in contexts where either:
 // - Another traceLocker is held.
@@ -21,19 +21,8 @@ type traceExpWriter struct {
 // This does not have the same stack growth restrictions as traceLocker.writer.
 //
 // buf may be nil.
-func unsafeTraceExpWriter(gen uintptr, buf *traceBuf, exp traceExperiment) traceExpWriter {
-       return traceExpWriter{traceWriter{traceLocker: traceLocker{gen: gen}, traceBuf: buf}, exp}
-}
-
-// ensure makes sure that at least maxSize bytes are available to write.
-//
-// Returns whether the buffer was flushed.
-func (w traceExpWriter) ensure(maxSize int) (traceExpWriter, bool) {
-       refill := w.traceBuf == nil || !w.available(maxSize)
-       if refill {
-               w.traceWriter = w.traceWriter.refill(w.exp)
-       }
-       return w, refill
+func unsafeTraceExpWriter(gen uintptr, buf *traceBuf, exp traceExperiment) traceWriter {
+       return traceWriter{traceLocker: traceLocker{gen: gen}, traceBuf: buf, exp: exp}
 }
 
 // traceExperiment is an enumeration of the different kinds of experiments supported for tracing.
@@ -45,6 +34,10 @@ const (
 
        // traceExperimentAllocFree is an experiment to add alloc/free events to the trace.
        traceExperimentAllocFree
+
+       // traceNumExperiments is the number of trace experiments (and 1 higher than
+       // the highest numbered experiment).
+       traceNumExperiments
 )
 
 // Experimental events.
index 5808fb005037f1ee1088a54feff1ec4ee0cfd17c..dfbf183de5ba7c77dff827acf7550d2c1cf897fa 100644 (file)
@@ -24,11 +24,11 @@ func (s *gTraceState) reset() {
 
 // mTraceState is per-M state for the tracer.
 type mTraceState struct {
-       seqlock       atomic.Uintptr // seqlock indicating that this M is writing to a trace buffer.
-       buf           [2]*traceBuf   // Per-M traceBuf for writing. Indexed by trace.gen%2.
-       link          *m             // Snapshot of alllink or freelink.
-       reentered     uint32         // Whether we've reentered tracing from within tracing.
-       oldthrowsplit bool           // gp.throwsplit upon calling traceLocker.writer. For debugging.
+       seqlock       atomic.Uintptr                    // seqlock indicating that this M is writing to a trace buffer.
+       buf           [2][traceNumExperiments]*traceBuf // Per-M traceBuf for writing. Indexed by trace.gen%2.
+       link          *m                                // Snapshot of alllink or freelink.
+       reentered     uint32                            // Whether we've reentered tracing from within tracing.
+       oldthrowsplit bool                              // gp.throwsplit upon calling traceLocker.writer. For debugging.
 }
 
 // pTraceState is per-P state for the tracer.
@@ -691,11 +691,13 @@ func traceThreadDestroy(mp *m) {
        systemstack(func() {
                lock(&trace.lock)
                for i := range mp.trace.buf {
-                       if mp.trace.buf[i] != nil {
-                               // N.B. traceBufFlush accepts a generation, but it
-                               // really just cares about gen%2.
-                               traceBufFlush(mp.trace.buf[i], uintptr(i))
-                               mp.trace.buf[i] = nil
+                       for exp, buf := range mp.trace.buf[i] {
+                               if buf != nil {
+                                       // N.B. traceBufFlush accepts a generation, but it
+                                       // really just cares about gen%2.
+                                       traceBufFlush(buf, uintptr(i))
+                                       mp.trace.buf[i][exp] = nil
+                               }
                        }
                }
                unlock(&trace.lock)
index b27a6909168b65056ad2749824d2f35c9f275857..d9e340f64a4cb194fecb362095d827623b00aae7 100644 (file)
@@ -43,7 +43,7 @@ func (t *traceTypeTable) dump(gen uintptr) {
        t.tab.reset()
 }
 
-func dumpTypesRec(node *traceMapNode, w traceExpWriter) traceExpWriter {
+func dumpTypesRec(node *traceMapNode, w traceWriter) traceWriter {
        typ := (*abi.Type)(*(*unsafe.Pointer)(unsafe.Pointer(&node.data[0])))
        typName := toRType(typ).string()