Fixes #15490.
Change-Id: I6ce9edc46398030ff639e22d4ca4adebccdfe1b7
Reviewed-on: https://go-review.googlesource.com/c/go/+/690399
Auto-Submit: Michael Knyszek <mknyszek@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Michael Pratt <mpratt@google.com>
sched.stwTotalTimeOther.write(out)
},
},
+ "/sched/threads/total:threads": {
+ deps: makeStatDepSet(schedStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = uint64(in.schedStats.threads)
+ },
+ },
"/sync/mutex/wait/total:seconds": {
compute: func(_ *statAggregate, out *metricValue) {
out.kind = metricKindFloat64
gNonGo uint64
gWaiting uint64
gCreated uint64
+ threads uint64
}
// compute populates the schedStatsAggregate with values from the runtime.
// approximate.
lock(&sched.lock)
+ // The total count of threads owned by Go is the number of Ms
+ // minus extra Ms on the list or in use.
+ a.threads = uint64(mcount()) - uint64(extraMInUse.Load()) - uint64(extraMLength.Load())
+
// Collect running/runnable from per-P run queues.
a.gCreated += sched.goroutinesCreated.Load()
for _, p := range allp {
Kind: KindFloat64Histogram,
Cumulative: true,
},
+ {
+ Name: "/sched/threads/total:threads",
+ Description: "The current count of live threads that are owned by the Go runtime.",
+ Kind: KindUint64,
+ },
{
Name: "/sync/mutex/wait/total:seconds",
Description: "Approximate cumulative time goroutines have spent blocked on a sync.Mutex, sync.RWMutex, or runtime-internal lock. This metric is useful for identifying global changes in lock contention. Collect a mutex or block profile using the runtime/pprof package for more detailed contention data.",
/sched/pauses/stopping/other:seconds). Bucket counts increase
monotonically.
+ /sched/threads/total:threads
+ The current count of live threads that are owned by the Go
+ runtime.
+
/sync/mutex/wait/total:seconds
Approximate cumulative time goroutines have spent blocked on a
sync.Mutex, sync.RWMutex, or runtime-internal lock. This metric
running
waiting
created
+ threads
+ numSamples
)
- var s [5]metrics.Sample
+ var s [numSamples]metrics.Sample
s[notInGo].Name = "/sched/goroutines/not-in-go:goroutines"
s[runnable].Name = "/sched/goroutines/runnable:goroutines"
s[running].Name = "/sched/goroutines/running:goroutines"
s[waiting].Name = "/sched/goroutines/waiting:goroutines"
s[created].Name = "/sched/goroutines-created:goroutines"
+ s[threads].Name = "/sched/threads/total:threads"
logMetrics := func(t *testing.T, s []metrics.Sample) {
for i := range s {
// goroutines.
const waitingSlack = 100
+ // threadsSlack is the maximum number of threads left over
+ // from other tests and the runtime (sysmon, the template thread, etc.)
+ const threadsSlack = 20
+
// Make sure GC isn't running, since GC workers interfere with
// expected counts.
defer debug.SetGCPercent(debug.SetGCPercent(-1))
}, time.Second)
logMetrics(t, s[:])
check(t, &s[running], count, count+4)
+ check(t, &s[threads], count, count+4+threadsSlack)
})
// Force runnable count to be high.
t.Run("running", func(t *testing.T) {
logMetrics(t, s[:])
checkEq(t, &s[running], 1)
+ checkEq(t, &s[threads], 1)
})
t.Run("runnable", func(t *testing.T) {
logMetrics(t, s[:])
// when it is just in a register or thread-local storage.
mp.alllink = allm
- // NumCgoCall() and others iterate over allm w/o schedlock,
+ // NumCgoCall and others iterate over allm w/o schedlock,
// so we need to publish it safely.
atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
unlock(&sched.lock)
}
var (
- allm *m
+ // Linked-list of all Ms. Written under sched.lock, read atomically.
+ allm *m
+
gomaxprocs int32
numCPUStartup int32
forcegc forcegcstate