trace.timeStart = nanotime()
trace.headerWritten = false
trace.footerWritten = false
- trace.strings = make(map[string]uint64)
+
+ // string to id mapping
+ // 0 : reserved for an empty string
+ // remaining: other strings registered by traceString
trace.stringSeq = 0
+ trace.strings = make(map[string]uint64)
+
trace.seqGC = 0
_g_.m.startingtrace = false
trace.enabled = true
// Register runtime goroutine labels.
_, pid, bufp := traceAcquireBuffer()
- buf := (*bufp).ptr()
- if buf == nil {
- buf = traceFlush(0).ptr()
- (*bufp).set(buf)
- }
for i, label := range gcMarkWorkerModeStrings[:] {
- trace.markWorkerLabels[i], buf = traceString(buf, label)
+ trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
}
traceReleaseBuffer(pid)
buf := (*bufp).ptr()
const maxSize = 2 + 5*traceBytesPerNumber // event type, length, sequence, timestamp, stack id and two add params
if buf == nil || len(buf.arr)-buf.pos < maxSize {
- buf = traceFlush(traceBufPtrOf(buf)).ptr()
+ buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
(*bufp).set(buf)
}
ticks := uint64(cputicks()) / traceTickDiv
tickDiff := ticks - buf.lastTicks
- if buf.pos == 0 {
- buf.byte(traceEvBatch | 1<<traceArgCountShift)
- buf.varint(uint64(pid))
- buf.varint(ticks)
- tickDiff = 0
- }
buf.lastTicks = ticks
narg := byte(len(args))
if skip >= 0 {
}
// traceFlush puts buf onto stack of full buffers and returns an empty buffer.
-func traceFlush(buf traceBufPtr) traceBufPtr {
+func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
owner := trace.lockOwner
dolock := owner == nil || owner != getg().m.curg
if dolock {
bufp := buf.ptr()
bufp.link.set(nil)
bufp.pos = 0
- bufp.lastTicks = 0
+
+ // initialize the buffer for a new batch
+ ticks := uint64(cputicks()) / traceTickDiv
+ bufp.lastTicks = ticks
+ bufp.byte(traceEvBatch | 1<<traceArgCountShift)
+ bufp.varint(uint64(pid))
+ bufp.varint(ticks)
+
if dolock {
unlock(&trace.lock)
}
return buf
}
-func traceString(buf *traceBuf, s string) (uint64, *traceBuf) {
+// traceString adds a string to the trace.strings and returns the id.
+func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
if s == "" {
- return 0, buf
+ return 0, bufp
}
if id, ok := trace.strings[s]; ok {
- return id, buf
+ return id, bufp
}
trace.stringSeq++
id := trace.stringSeq
trace.strings[s] = id
+ // memory allocation in above may trigger tracing and
+ // cause *bufp changes. Following code now works with *bufp,
+ // so there must be no memory allocation or any activities
+ // that causes tracing after this point.
+
+ buf := (*bufp).ptr()
size := 1 + 2*traceBytesPerNumber + len(s)
- if len(buf.arr)-buf.pos < size {
- buf = traceFlush(traceBufPtrOf(buf)).ptr()
+ if buf == nil || len(buf.arr)-buf.pos < size {
+ buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
+ (*bufp).set(buf)
}
buf.byte(traceEvString)
buf.varint(id)
buf.varint(uint64(len(s)))
buf.pos += copy(buf.arr[buf.pos:], s)
- return id, buf
+
+ (*bufp).set(buf)
+ return id, bufp
}
// traceAppend appends v to buf in little-endian-base-128 encoding.
// releases all memory and resets state.
func (tab *traceStackTable) dump() {
var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
- buf := traceFlush(0).ptr()
+ bufp := traceFlush(0, 0)
for _, stk := range tab.tab {
stk := stk.ptr()
for ; stk != nil; stk = stk.link.ptr() {
tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
for _, f := range frames {
var frame traceFrame
- frame, buf = traceFrameForPC(buf, f)
+ frame, bufp = traceFrameForPC(bufp, 0, f)
tmpbuf = traceAppend(tmpbuf, uint64(f.PC))
tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
}
// Now copy to the buffer.
size := 1 + traceBytesPerNumber + len(tmpbuf)
- if len(buf.arr)-buf.pos < size {
- buf = traceFlush(traceBufPtrOf(buf)).ptr()
+ if buf := bufp.ptr(); len(buf.arr)-buf.pos < size {
+ bufp = traceFlush(bufp, 0)
}
+ buf := bufp.ptr()
buf.byte(traceEvStack | 3<<traceArgCountShift)
buf.varint(uint64(len(tmpbuf)))
buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
}
lock(&trace.lock)
- traceFullQueue(traceBufPtrOf(buf))
+ traceFullQueue(bufp)
unlock(&trace.lock)
tab.mem.drop()
line uint64
}
-func traceFrameForPC(buf *traceBuf, f Frame) (traceFrame, *traceBuf) {
+// traceFrameForPC records the frame information.
+// It may allocate memory.
+func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
+ bufp := &buf
var frame traceFrame
fn := f.Function
if len(fn) > maxLen {
fn = fn[len(fn)-maxLen:]
}
- frame.funcID, buf = traceString(buf, fn)
+ frame.funcID, bufp = traceString(bufp, pid, fn)
frame.line = uint64(f.Line)
file := f.File
if len(file) > maxLen {
file = file[len(file)-maxLen:]
}
- frame.fileID, buf = traceString(buf, file)
- return frame, buf
+ frame.fileID, bufp = traceString(bufp, pid, file)
+ return frame, (*bufp)
}
// traceAlloc is a non-thread-safe region allocator.
import (
"bytes"
"flag"
+ "internal/race"
"internal/trace"
"io"
"io/ioutil"
"os"
"runtime"
. "runtime/trace"
+ "strconv"
"sync"
"testing"
"time"
saveTraces = flag.Bool("savetraces", false, "save traces collected by tests")
)
+// TestEventBatch tests Flush calls that happen during Start
+// don't produce corrupted traces.
+func TestEventBatch(t *testing.T) {
+ if race.Enabled {
+ t.Skip("skipping in race mode")
+ }
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+ // During Start, bunch of records are written to reflect the current
+ // snapshot of the program, including state of each goroutines.
+ // And some string constants are written to the trace to aid trace
+ // parsing. This test checks Flush of the buffer occurred during
+ // this process doesn't cause corrupted traces.
+ // When a Flush is called during Start is complicated
+ // so we test with a range of number of goroutines hoping that one
+ // of them triggers Flush.
+ // This range was chosen to fill up a ~64KB buffer with traceEvGoCreate
+ // and traceEvGoWaiting events (12~13bytes per goroutine).
+ for g := 4950; g < 5050; g++ {
+ n := g
+ t.Run("G="+strconv.Itoa(n), func(t *testing.T) {
+ var wg sync.WaitGroup
+ wg.Add(n)
+
+ in := make(chan bool, 1000)
+ for i := 0; i < n; i++ {
+ go func() {
+ <-in
+ wg.Done()
+ }()
+ }
+ buf := new(bytes.Buffer)
+ if err := Start(buf); err != nil {
+ t.Fatalf("failed to start tracing: %v", err)
+ }
+
+ for i := 0; i < n; i++ {
+ in <- true
+ }
+ wg.Wait()
+ Stop()
+
+ _, err := trace.Parse(buf, "")
+ if err == trace.ErrTimeOrder {
+ t.Skipf("skipping trace: %v", err)
+ }
+
+ if err != nil {
+ t.Fatalf("failed to parse trace: %v", err)
+ }
+ })
+ }
+}
+
func TestTraceStartStop(t *testing.T) {
buf := new(bytes.Buffer)
if err := Start(buf); err != nil {