import (
"internal/goarch"
- "runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
// We must still stay on the same m.
defer unlockOSThread()
- if gp.m.needextram || atomic.Load(&extraMWaiters) > 0 {
+ if gp.m.needextram || extraMWaiters.Load() > 0 {
gp.m.needextram = false
systemstack(newextram)
}
// It is called with a working local m, so that it can do things
// like call schedlock and allocate.
func newextram() {
- c := atomic.Xchg(&extraMWaiters, 0)
+ c := extraMWaiters.Swap(0)
if c > 0 {
for i := uint32(0); i < c; i++ {
oneNewExtraM()
return uintptr(unsafe.Pointer(getg().m))
}
-var extram uintptr
+var extram atomic.Uintptr
var extraMCount uint32 // Protected by lockextra
-var extraMWaiters uint32
+var extraMWaiters atomic.Uint32
// lockextra locks the extra list and returns the list head.
// The caller must unlock the list by storing a new list head
incr := false
for {
- old := atomic.Loaduintptr(&extram)
+ old := extram.Load()
if old == locked {
osyield_no_g()
continue
// Add 1 to the number of threads
// waiting for an M.
// This is cleared by newextram.
- atomic.Xadd(&extraMWaiters, 1)
+ extraMWaiters.Add(1)
incr = true
}
usleep_no_g(1)
continue
}
- if atomic.Casuintptr(&extram, old, locked) {
+ if extram.CompareAndSwap(old, locked) {
return (*m)(unsafe.Pointer(old))
}
osyield_no_g()
//go:nosplit
func unlockextra(mp *m) {
- atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
+ extram.Store(uintptr(unsafe.Pointer(mp)))
}
var (