)
// resource is a generic constraint interface for resource IDs.
-type resource interface{ tracev2.GoID | tracev2.ProcID }
+type resource interface {
+ tracev2.GoID | tracev2.ProcID | tracev2.ThreadID
+}
// noResource indicates the lack of a resource.
const noResource = -1
// unblock indicates that the goroutine gs represents has been unblocked.
func (gs *gState[R]) unblock(ts tracev2.Time, stack tracev2.Stack, resource R, ctx *traceContext) {
- // Unblocking goroutine.
name := "unblock"
viewerResource := uint64(resource)
+ if gs.startBlockReason != "" {
+ name = fmt.Sprintf("%s (%s)", name, gs.startBlockReason)
+ }
if strings.Contains(gs.startBlockReason, "network") {
- // Emit an unblock instant event for the "Network" lane.
+ // Attribute the network instant to the nebulous "NetpollP" if
+ // resource isn't a thread, because there's a good chance that
+ // resource isn't going to be valid in this case.
+ //
+ // TODO(mknyszek): Handle this invalidness in a more general way.
+ if _, ok := any(resource).(tracev2.ThreadID); !ok {
+ // Emit an unblock instant event for the "Network" lane.
+ viewerResource = trace.NetpollP
+ }
ctx.Instant(traceviewer.InstantEvent{
Name: name,
Ts: ctx.elapsed(ts),
- Resource: trace.NetpollP,
+ Resource: viewerResource,
Stack: ctx.Stack(viewerFrames(stack)),
})
- gs.startBlockReason = ""
- viewerResource = trace.NetpollP
}
+ gs.startBlockReason = ""
if viewerResource != 0 {
gs.setStartCause(ts, name, viewerResource, stack)
}
--- /dev/null
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "fmt"
+ "internal/trace/traceviewer"
+ "internal/trace/traceviewer/format"
+ tracev2 "internal/trace/v2"
+)
+
+var _ generator = &threadGenerator{}
+
+type threadGenerator struct {
+ globalRangeGenerator
+ globalMetricGenerator
+ stackSampleGenerator[tracev2.ThreadID]
+
+ gStates map[tracev2.GoID]*gState[tracev2.ThreadID]
+ threads map[tracev2.ThreadID]struct{}
+}
+
+func newThreadGenerator() *threadGenerator {
+ tg := new(threadGenerator)
+ tg.stackSampleGenerator.getResource = func(ev *tracev2.Event) tracev2.ThreadID {
+ return ev.Thread()
+ }
+ tg.gStates = make(map[tracev2.GoID]*gState[tracev2.ThreadID])
+ tg.threads = make(map[tracev2.ThreadID]struct{})
+ return tg
+}
+
+func (g *threadGenerator) Sync() {
+ g.globalRangeGenerator.Sync()
+}
+
+func (g *threadGenerator) GoroutineLabel(ctx *traceContext, ev *tracev2.Event) {
+ l := ev.Label()
+ g.gStates[l.Resource.Goroutine()].setLabel(l.Label)
+}
+
+func (g *threadGenerator) GoroutineRange(ctx *traceContext, ev *tracev2.Event) {
+ r := ev.Range()
+ switch ev.Kind() {
+ case tracev2.EventRangeBegin:
+ g.gStates[r.Scope.Goroutine()].rangeBegin(ev.Time(), r.Name, ev.Stack())
+ case tracev2.EventRangeActive:
+ g.gStates[r.Scope.Goroutine()].rangeActive(r.Name)
+ case tracev2.EventRangeEnd:
+ gs := g.gStates[r.Scope.Goroutine()]
+ gs.rangeEnd(ev.Time(), r.Name, ev.Stack(), ctx)
+ }
+}
+
+func (g *threadGenerator) GoroutineTransition(ctx *traceContext, ev *tracev2.Event) {
+ if ev.Thread() != tracev2.NoThread {
+ if _, ok := g.threads[ev.Thread()]; !ok {
+ g.threads[ev.Thread()] = struct{}{}
+ }
+ }
+
+ st := ev.StateTransition()
+ goID := st.Resource.Goroutine()
+
+ // If we haven't seen this goroutine before, create a new
+ // gState for it.
+ gs, ok := g.gStates[goID]
+ if !ok {
+ gs = newGState[tracev2.ThreadID](goID)
+ g.gStates[goID] = gs
+ }
+ // If we haven't already named this goroutine, try to name it.
+ gs.augmentName(st.Stack)
+
+ // Handle the goroutine state transition.
+ from, to := st.Goroutine()
+ if from == to {
+ // Filter out no-op events.
+ return
+ }
+ if from.Executing() && !to.Executing() {
+ if to == tracev2.GoWaiting {
+ // Goroutine started blocking.
+ gs.block(ev.Time(), ev.Stack(), st.Reason, ctx)
+ } else {
+ gs.stop(ev.Time(), ev.Stack(), ctx)
+ }
+ }
+ if !from.Executing() && to.Executing() {
+ start := ev.Time()
+ if from == tracev2.GoUndetermined {
+ // Back-date the event to the start of the trace.
+ start = ctx.startTime
+ }
+ gs.start(start, ev.Thread(), ctx)
+ }
+
+ if from == tracev2.GoWaiting {
+ // Goroutine was unblocked.
+ gs.unblock(ev.Time(), ev.Stack(), ev.Thread(), ctx)
+ }
+ if from == tracev2.GoNotExist && to == tracev2.GoRunnable {
+ // Goroutine was created.
+ gs.created(ev.Time(), ev.Thread(), ev.Stack())
+ }
+ if from == tracev2.GoSyscall {
+ // Exiting syscall.
+ gs.syscallEnd(ev.Time(), to != tracev2.GoRunning, ctx)
+ }
+
+ // Handle syscalls.
+ if to == tracev2.GoSyscall {
+ start := ev.Time()
+ if from == tracev2.GoUndetermined {
+ // Back-date the event to the start of the trace.
+ start = ctx.startTime
+ }
+ // Write down that we've entered a syscall. Note: we might have no P here
+ // if we're in a cgo callback or this is a transition from GoUndetermined
+ // (i.e. the G has been blocked in a syscall).
+ gs.syscallBegin(start, ev.Thread(), ev.Stack())
+ }
+
+ // Note down the goroutine transition.
+ _, inMarkAssist := gs.activeRanges["GC mark assist"]
+ ctx.GoroutineTransition(ctx.elapsed(ev.Time()), viewerGState(from, inMarkAssist), viewerGState(to, inMarkAssist))
+}
+
+func (g *threadGenerator) ProcTransition(ctx *traceContext, ev *tracev2.Event) {
+ if ev.Thread() != tracev2.NoThread {
+ if _, ok := g.threads[ev.Thread()]; !ok {
+ g.threads[ev.Thread()] = struct{}{}
+ }
+ }
+
+ type procArg struct {
+ Proc uint64 `json:"proc,omitempty"`
+ }
+ st := ev.StateTransition()
+ viewerEv := traceviewer.InstantEvent{
+ Resource: uint64(ev.Thread()),
+ Stack: ctx.Stack(viewerFrames(ev.Stack())),
+ Arg: procArg{Proc: uint64(st.Resource.Proc())},
+ }
+
+ from, to := st.Proc()
+ if from == to {
+ // Filter out no-op events.
+ return
+ }
+ if to.Executing() {
+ start := ev.Time()
+ if from == tracev2.ProcUndetermined {
+ start = ctx.startTime
+ }
+ viewerEv.Name = "proc start"
+ viewerEv.Arg = format.ThreadIDArg{ThreadID: uint64(ev.Thread())}
+ viewerEv.Ts = ctx.elapsed(start)
+ // TODO(mknyszek): We don't have a state machine for threads, so approximate
+ // running threads with running Ps.
+ ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, 1)
+ }
+ if from.Executing() {
+ start := ev.Time()
+ viewerEv.Name = "proc stop"
+ viewerEv.Ts = ctx.elapsed(start)
+ // TODO(mknyszek): We don't have a state machine for threads, so approximate
+ // running threads with running Ps.
+ ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, -1)
+ }
+ // TODO(mknyszek): Consider modeling procs differently and have them be
+ // transition to and from NotExist when GOMAXPROCS changes. We can emit
+ // events for this to clearly delineate GOMAXPROCS changes.
+
+ if viewerEv.Name != "" {
+ ctx.Instant(viewerEv)
+ }
+}
+
+func (g *threadGenerator) ProcRange(ctx *traceContext, ev *tracev2.Event) {
+ // TODO(mknyszek): Extend procRangeGenerator to support rendering proc ranges on threads.
+}
+
+func (g *threadGenerator) Finish(ctx *traceContext) {
+ ctx.SetResourceType("OS THREADS")
+
+ // Finish off global ranges.
+ g.globalRangeGenerator.Finish(ctx)
+
+ // Finish off all the goroutine slices.
+ for _, gs := range g.gStates {
+ gs.finish(ctx)
+ }
+
+ // Name all the threads to the emitter.
+ for id := range g.threads {
+ ctx.Resource(uint64(id), fmt.Sprintf("Thread %d", id))
+ }
+}
"strings"
)
-func MainHandler(ranges []Range) http.Handler {
+func MainHandler(views []View) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
- if err := templMain.Execute(w, ranges); err != nil {
+ if err := templMain.Execute(w, views); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
</p>
<h2>Event timelines for running goroutines</h2>
-{{if $}}
+{{range $i, $view := $}}
+{{if $view.Ranges}}
+{{if eq $i 0}}
<p>
Large traces are split into multiple sections of equal data size
(not duration) to avoid overwhelming the visualizer.
</p>
+{{end}}
<ul>
- {{range $e := $}}
- <li><a href="{{$e.URL}}">View trace ({{$e.Name}})</a></li>
+ {{range $index, $e := $view.Ranges}}
+ <li><a href="{{$view.URL $index}}">View trace by {{$view.Type}} ({{$e.Name}})</a></li>
{{end}}
</ul>
{{else}}
<ul>
- <li><a href="/trace">View trace</a></li>
+ <li><a href="{{$view.URL -1}}">View trace by {{$view.Type}}</a></li>
</ul>
{{end}}
+{{end}}
<p>
- This view displays a timeline for each of the GOMAXPROCS logical
- processors, showing which goroutine (if any) was running on that
+ This view displays a series of timelines for a type of resource.
+ The "by proc" view consists of a timeline for each of the GOMAXPROCS
+ logical processors, showing which goroutine (if any) was running on that
logical processor at each moment.
+ The "by thread" view (if available) consists of a similar timeline for each
+ OS thread.
Each goroutine has an identifying number (e.g. G123), main function,
and color.
</html>
`))
+type View struct {
+ Type ViewType
+ Ranges []Range
+}
+
+type ViewType string
+
+const (
+ ViewProc ViewType = "proc"
+ ViewThread ViewType = "thread"
+)
+
+func (v View) URL(rangeIdx int) string {
+ if rangeIdx < 0 {
+ return fmt.Sprintf("/trace?view=%s", v.Type)
+ }
+ return v.Ranges[rangeIdx].URL(v.Type)
+}
+
type Range struct {
Name string
Start int
EndTime int64
}
-func (r Range) URL() string {
- return fmt.Sprintf("/trace?start=%d&end=%d", r.Start, r.End)
+func (r Range) URL(viewType ViewType) string {
+ return fmt.Sprintf("/trace?view=%s&start=%d&end=%d", viewType, r.Start, r.End)
}
func TraceHandler() http.Handler {