"errors"
"io"
"os"
- "runtime"
"sync"
"sync/atomic"
"syscall"
full = pq.full
}
- // Before we check dt.expired, yield to other goroutines.
- // This may help to prevent starvation of the goroutine that runs the
- // deadlineTimer's time.After callback.
- //
- // TODO(#65178): Remove this when the runtime scheduler no longer starves
- // runnable goroutines.
- runtime.Gosched()
-
select {
case <-dt.expired:
return 0, os.ErrDeadlineExceeded
empty = pq.empty
}
- // Before we check dt.expired, yield to other goroutines.
- // This may help to prevent starvation of the goroutine that runs the
- // deadlineTimer's time.After callback.
- //
- // TODO(#65178): Remove this when the runtime scheduler no longer starves
- // runnable goroutines.
- runtime.Gosched()
-
select {
case <-dt.expired:
return 0, nil, os.ErrDeadlineExceeded
// Allow newproc to start new Ms.
mainStarted = true
- if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon
+ if haveSysmon {
systemstack(func() {
newm(sysmon, nil, -1)
})
// golang.org/issue/42515 is needed on NetBSD.
var needSysmonWorkaround bool = false
+// haveSysmon indicates whether there is sysmon thread support.
+//
+// No threads on wasm yet, so no sysmon.
+const haveSysmon = GOARCH != "wasm"
+
// Always runs without a P, so write barriers are not allowed.
//
//go:nowritebarrierrec
s := pp.status
sysretake := false
if s == _Prunning || s == _Psyscall {
- // Preempt G if it's running for too long.
+ // Preempt G if it's running on the same schedtick for
+ // too long. This could be from a single long-running
+ // goroutine or a sequence of goroutines run via
+ // runnext, which share a single schedtick time slice.
t := int64(pp.schedtick)
if int64(pd.schedtick) != t {
pd.schedtick = uint32(t)
// If the run queue is full, runnext puts g on the global queue.
// Executed only by the owner P.
func runqput(pp *p, gp *g, next bool) {
+ if !haveSysmon && next {
+ // A runnext goroutine shares the same time slice as the
+ // current goroutine (inheritTime from runqget). To prevent a
+ // ping-pong pair of goroutines from starving all others, we
+ // depend on sysmon to preempt "long-running goroutines". That
+ // is, any set of goroutines sharing the same time slice.
+ //
+ // If there is no sysmon, we must avoid runnext entirely or
+ // risk starvation.
+ next = false
+ }
if randomizeScheduler && next && randn(2) == 0 {
next = false
}
// the AfterFunc goroutine instead of the runnable channel goroutine.
// However, in https://go.dev/issue/65178 this was observed to live-lock
// on wasip1/wasm and js/wasm after <10000 runs.
-
- if runtime.GOARCH == "wasm" {
- testenv.SkipFlaky(t, 65178)
- }
-
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
var (