From: Rhys Hiltner Date: Tue, 14 May 2024 19:37:14 +0000 (-0700) Subject: runtime: use semaphore structure for futex locks X-Git-Tag: go1.23rc1~251 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=dfb7073bb8e66630156fc14ae50042acef89a929;p=gostls13.git runtime: use semaphore structure for futex locks Prepare the futex-based implementation of lock2 to maintain a list of waiting Ms. Beyond storing an muintptr in the mutex's key field, we now must never overwrite that field (even for a moment) without taking its current value into account. The semaphore-based implementation of lock2 already has that behavior. Reuse that structure. For #66999 Change-Id: I23b6f6bacb276fe33c6aed5c0571161a7e71fe6c Reviewed-on: https://go-review.googlesource.com/c/go/+/585636 Reviewed-by: Dmitri Shuralyov Auto-Submit: Rhys Hiltner Reviewed-by: Michael Pratt LUCI-TryBot-Result: Go LUCI --- diff --git a/src/runtime/lock_futex.go b/src/runtime/lock_futex.go index 58690e45e4..2d00635ba7 100644 --- a/src/runtime/lock_futex.go +++ b/src/runtime/lock_futex.go @@ -23,19 +23,20 @@ import ( // If any procs are sleeping on addr, wake up at most cnt. const ( - mutex_unlocked = 0 - mutex_locked = 1 - mutex_sleeping = 2 + mutex_locked = 0x1 + mutex_sleeping = 0x2 // Ensure futex's low 32 bits won't be all zeros active_spin = 4 active_spin_cnt = 30 passive_spin = 1 ) -// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping. -// mutex_sleeping means that there is presumably at least one sleeping thread. -// Note that there can be spinning threads during all states - they do not -// affect mutex's state. +// The mutex.key holds two state flags in its lowest bits: When the mutex_locked +// bit is set, the mutex is locked. When the mutex_sleeping bit is set, a thread +// is waiting in futexsleep for the mutex to be available. These flags operate +// independently: a thread can enter lock2, observe that another thread is +// already asleep, and immediately try to grab the lock anyway without waiting +// for its "fair" turn. // We use the uintptr mutex.key and note.key as a uint32. // @@ -54,27 +55,16 @@ func lock(l *mutex) { func lock2(l *mutex) { gp := getg() - if gp.m.locks < 0 { throw("runtime·lock: lock count") } gp.m.locks++ // Speculative grab for lock. - v := atomic.Xchg(key32(&l.key), mutex_locked) - if v == mutex_unlocked { + if atomic.Casuintptr(&l.key, 0, mutex_locked) { return } - // wait is either MUTEX_LOCKED or MUTEX_SLEEPING - // depending on whether there is a thread sleeping - // on this mutex. If we ever change l->key from - // MUTEX_SLEEPING to some other value, we must be - // careful to change it back to MUTEX_SLEEPING before - // returning, to ensure that the sleeping thread gets - // its wakeup call. - wait := v - timer := &lockTimer{lock: l} timer.begin() // On uniprocessors, no point spinning. @@ -83,37 +73,39 @@ func lock2(l *mutex) { if ncpu > 1 { spin = active_spin } - for { - // Try for lock, spinning. - for i := 0; i < spin; i++ { - for l.key == mutex_unlocked { - if atomic.Cas(key32(&l.key), mutex_unlocked, wait) { - timer.end() - return - } +Loop: + for i := 0; ; i++ { + v := atomic.Loaduintptr(&l.key) + if v&mutex_locked == 0 { + // Unlocked. Try to lock. + if atomic.Casuintptr(&l.key, v, v|mutex_locked) { + timer.end() + return } - procyield(active_spin_cnt) + i = 0 } - - // Try for lock, rescheduling. - for i := 0; i < passive_spin; i++ { - for l.key == mutex_unlocked { - if atomic.Cas(key32(&l.key), mutex_unlocked, wait) { - timer.end() - return + if i < spin { + procyield(active_spin_cnt) + } else if i < spin+passive_spin { + osyield() + } else { + // Someone else has it. + for { + head := v &^ (mutex_locked | mutex_sleeping) + if atomic.Casuintptr(&l.key, v, head|mutex_locked|mutex_sleeping) { + break + } + v = atomic.Loaduintptr(&l.key) + if v&mutex_locked == 0 { + continue Loop } } - osyield() - } - - // Sleep. - v = atomic.Xchg(key32(&l.key), mutex_sleeping) - if v == mutex_unlocked { - timer.end() - return + if v&mutex_locked != 0 { + // Queued. Wait. + futexsleep(key32(&l.key), uint32(v), -1) + i = 0 + } } - wait = mutex_sleeping - futexsleep(key32(&l.key), mutex_sleeping, -1) } } @@ -122,12 +114,21 @@ func unlock(l *mutex) { } func unlock2(l *mutex) { - v := atomic.Xchg(key32(&l.key), mutex_unlocked) - if v == mutex_unlocked { - throw("unlock of unlocked lock") - } - if v == mutex_sleeping { - futexwakeup(key32(&l.key), 1) + for { + v := atomic.Loaduintptr(&l.key) + if v == mutex_locked { + if atomic.Casuintptr(&l.key, mutex_locked, 0) { + break + } + } else if v&mutex_locked == 0 { + throw("unlock of unlocked lock") + } else { + // Other M's are waiting for the lock. + if atomic.Casuintptr(&l.key, v, v&^mutex_locked) { + futexwakeup(key32(&l.key), 1) + break + } + } } gp := getg()