From: Michael Pratt Date: Wed, 20 Jul 2022 21:39:12 +0000 (-0400) Subject: runtime: convert schedt.lastpoll to atomic type X-Git-Tag: go1.20rc1~1670 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=5d7d50111f123011723989a7d123d6f0a6df38e6;p=gostls13.git runtime: convert schedt.lastpoll to atomic type Note that this changes the type from uint64 to int64, the type used by nanotime(). It also adds an atomic load in pollWork(), which used to use a non-atomic load. For #53821. Change-Id: I6173c90f20bfdc0e0a4bc3a7b1c798d1c429fff5 Reviewed-on: https://go-review.googlesource.com/c/go/+/419442 Run-TryBot: Michael Pratt Reviewed-by: Austin Clements TryBot-Result: Gopher Robot --- diff --git a/src/runtime/align_runtime_test.go b/src/runtime/align_runtime_test.go index 18a3908fda..c3b9c1712c 100644 --- a/src/runtime/align_runtime_test.go +++ b/src/runtime/align_runtime_test.go @@ -17,7 +17,6 @@ var AtomicFields = []uintptr{ unsafe.Offsetof(p{}.timer0When), unsafe.Offsetof(p{}.timerModifiedEarliest), unsafe.Offsetof(p{}.gcFractionalMarkTime), - unsafe.Offsetof(schedt{}.lastpoll), unsafe.Offsetof(schedt{}.pollUntil), unsafe.Offsetof(schedt{}.timeToRun), unsafe.Offsetof(timeHistogram{}.underflow), diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 2ebca7b267..fd9c1daf43 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -714,7 +714,7 @@ func schedinit() { gcinit() lock(&sched.lock) - sched.lastpoll = uint64(nanotime()) + sched.lastpoll.Store(nanotime()) procs := ncpu if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 { procs = n @@ -2390,7 +2390,7 @@ func handoffp(pp *p) { } // If this is the last running P and nobody is polling network, // need to wakeup another M to poll network. - if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 { + if sched.npidle == uint32(gomaxprocs-1) && sched.lastpoll.Load() != 0 { unlock(&sched.lock) startm(pp, false) return @@ -2632,7 +2632,7 @@ top: // blocked thread (e.g. it has already returned from netpoll, but does // not set lastpoll yet), this thread will do blocking netpoll below // anyway. - if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 { + if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll.Load() != 0 { if list := netpoll(0); !list.empty() { // non-blocking gp := list.pop() injectglist(&list) @@ -2803,7 +2803,7 @@ top: } // Poll network until next timer. - if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 { + if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 { atomic.Store64(&sched.pollUntil, uint64(pollUntil)) if mp.p != 0 { throw("findrunnable: netpoll with p") @@ -2826,7 +2826,7 @@ top: } list := netpoll(delay) // block until new work is available atomic.Store64(&sched.pollUntil, 0) - atomic.Store64(&sched.lastpoll, uint64(now)) + sched.lastpoll.Store(now) if faketime != 0 && list.empty() { // Using fake time and nothing is ready; stop M. // When all M's stop, checkdead will call timejump. @@ -2877,7 +2877,7 @@ func pollWork() bool { if !runqempty(p) { return true } - if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 { + if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll.Load() != 0 { if list := netpoll(0); !list.empty() { injectglist(&list) return true @@ -3066,7 +3066,7 @@ func checkIdleGCNoP() (*p, *g) { // going to wake up before the when argument; or it wakes an idle P to service // timers and the network poller if there isn't one already. func wakeNetPoller(when int64) { - if atomic.Load64(&sched.lastpoll) == 0 { + if sched.lastpoll.Load() == 0 { // In findrunnable we ensure that when polling the pollUntil // field is either zero or the time to which the current // poll is expected to run. This can have a spurious wakeup @@ -5200,9 +5200,9 @@ func sysmon() { asmcgocall(*cgo_yield, nil) } // poll network if not polled for more than 10ms - lastpoll := int64(atomic.Load64(&sched.lastpoll)) + lastpoll := sched.lastpoll.Load() if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now { - atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) + sched.lastpoll.CompareAndSwap(lastpoll, now) list := netpoll(0) // non-blocking - returns list of goroutines if !list.empty() { // Need to decrement number of idle locked M's diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 78a089ec9d..1d678883f0 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -760,7 +760,7 @@ type p struct { type schedt struct { // accessed atomically. keep at top to ensure alignment on 32-bit systems. goidgen atomic.Uint64 - lastpoll uint64 // time of last network poll, 0 if currently polling + lastpoll atomic.Int64 // time of last network poll, 0 if currently polling pollUntil uint64 // time to which current poll is sleeping lock mutex