sp := sys.GetCallerSP()
callbackUpdateSystemStack(mp, sp, signal)
- // Should mark we are already in Go now.
+ // We must mark that we are already in Go now.
// Otherwise, we may call needm again when we get a signal, before cgocallbackg1,
// which means the extram list may be empty, that will cause a deadlock.
mp.isExtraInC = false
// mp.curg is now a real goroutine.
casgstatus(mp.curg, _Gdeadextra, _Gsyscall)
sched.ngsys.Add(-1)
- sched.nGsyscallNoP.Add(1)
+ // N.B. We do not update nGsyscallNoP, because isExtraInC threads are not
+ // counted as real goroutines while they're in C.
if !signal {
if trace.ok() {
casgstatus(mp.curg, _Gsyscall, _Gdeadextra)
mp.curg.preemptStop = false
sched.ngsys.Add(1)
- sched.nGsyscallNoP.Add(-1)
+ decGSyscallNoP(mp)
if !mp.isExtraInSig {
if trace.ok() {
if trace.ok() {
trace.ProcStop(pp)
}
- sched.nGsyscallNoP.Add(1)
+ addGSyscallNoP(gp.m) // We gave up our P voluntarily.
pp.gcStopTime = nanotime()
pp.syscalltick++
if sched.stopwait--; sched.stopwait == 0 {
gp.m.syscalltick = gp.m.p.ptr().syscalltick
gp.m.p.ptr().syscalltick++
- sched.nGsyscallNoP.Add(1)
+ addGSyscallNoP(gp.m) // We're going to give up our P.
// Leave SP around for GC and traceback.
pc := sys.GetCallerPC()
if oldp != nil {
if thread, ok := setBlockOnExitSyscall(oldp); ok {
thread.takeP()
+ addGSyscallNoP(thread.mp) // takeP does the opposite, but this is a net zero change.
thread.resume()
- sched.nGsyscallNoP.Add(-1) // takeP adds 1.
return oldp
}
}
}
unlock(&sched.lock)
if pp != nil {
- sched.nGsyscallNoP.Add(-1)
+ decGSyscallNoP(getg().m) // We got a P for ourselves.
return pp
}
}
trace.GoSysExit(true)
traceRelease(trace)
}
- sched.nGsyscallNoP.Add(-1)
+ decGSyscallNoP(getg().m)
dropg()
lock(&sched.lock)
var pp *p
schedule() // Never returns.
}
+// addGSyscallNoP must be called when a goroutine in a syscall loses its P.
+// This function updates all relevant accounting.
+//
+// nosplit because it's called on the syscall paths.
+//
+//go:nosplit
+func addGSyscallNoP(mp *m) {
+ // It's safe to read isExtraInC here because it's only mutated
+ // outside of _Gsyscall, and we know this thread is attached
+ // to a goroutine in _Gsyscall and blocked from exiting.
+ if !mp.isExtraInC {
+ // Increment nGsyscallNoP since we're taking away a P
+ // from a _Gsyscall goroutine, but only if isExtraInC
+ // is not set on the M. If it is, then this thread is
+ // back to being a full C thread, and will just inflate
+ // the count of not-in-go goroutines. See go.dev/issue/76435.
+ sched.nGsyscallNoP.Add(1)
+ }
+}
+
+// decGSsyscallNoP must be called whenever a goroutine in a syscall without
+// a P exits the system call. This function updates all relevant accounting.
+//
+// nosplit because it's called from dropm.
+//
+//go:nosplit
+func decGSyscallNoP(mp *m) {
+ // Update nGsyscallNoP, but only if this is not a thread coming
+ // out of C. See the comment in addGSyscallNoP. This logic must match,
+ // to avoid unmatched increments and decrements.
+ if !mp.isExtraInC {
+ sched.nGsyscallNoP.Add(-1)
+ }
+}
+
// Called from syscall package before fork.
//
// syscall_runtime_BeforeFork is for package syscall,
trace.ProcSteal(s.pp)
traceRelease(trace)
}
- sched.nGsyscallNoP.Add(1)
+ addGSyscallNoP(s.mp)
s.pp.syscalltick++
}
--- /dev/null
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !plan9 && !windows
+// +build !plan9,!windows
+
+package main
+
+/*
+#include <stdatomic.h>
+#include <stddef.h>
+#include <pthread.h>
+
+extern void Ready();
+
+static int spinning;
+static int released;
+
+static void* enterGoThenSpinTwice(void* arg __attribute__ ((unused))) {
+ Ready();
+ atomic_fetch_add(&spinning, 1);
+ while(atomic_load(&released) == 0) {};
+
+ Ready();
+ atomic_fetch_add(&spinning, 1);
+ while(1) {};
+ return NULL;
+}
+
+static void SpinTwiceInNewCThread() {
+ pthread_t tid;
+ pthread_create(&tid, NULL, enterGoThenSpinTwice, NULL);
+}
+
+static int Spinning() {
+ return atomic_load(&spinning);
+}
+
+static void Release() {
+ atomic_store(&spinning, 0);
+ atomic_store(&released, 1);
+}
+*/
+import "C"
+
+import (
+ "os"
+ "runtime"
+ "runtime/metrics"
+)
+
+func init() {
+ register("NotInGoMetricCallback", NotInGoMetricCallback)
+}
+
+func NotInGoMetricCallback() {
+ const N = 10
+ s := []metrics.Sample{{Name: "/sched/goroutines/not-in-go:goroutines"}}
+
+ // Create N new C threads that have called into Go at least once.
+ for range N {
+ C.SpinTwiceInNewCThread()
+ }
+
+ // Synchronize with spinning threads twice.
+ //
+ // This helps catch bad accounting by taking at least a couple other
+ // codepaths which would cause the accounting to change.
+ for i := range 2 {
+ // Make sure they pass through Go.
+ // N.B. Ready is called twice by the new threads.
+ for j := range N {
+ <-readyCh
+ if j == 2 {
+ // Try to trigger an update in the immediate STW handoff case.
+ runtime.ReadMemStats(&m)
+ }
+ }
+
+ // Make sure they're back in C.
+ for C.Spinning() < N {
+ }
+
+ // Do something that stops the world to take all the Ps back.
+ runtime.ReadMemStats(&m)
+
+ if i == 0 {
+ C.Release()
+ }
+ }
+
+ // Read not-in-go.
+ metrics.Read(s)
+ if n := s[0].Value.Uint64(); n != 0 {
+ println("expected 0 not-in-go goroutines, found", n)
+ os.Exit(2)
+ }
+ println("OK")
+}
+
+var m runtime.MemStats
+var readyCh = make(chan bool)
+
+//export Ready
+func Ready() {
+ readyCh <- true
+}