// If any procs are sleeping on addr, wake up at most cnt.
const (
- mutex_unlocked = 0
- mutex_locked = 1
- mutex_sleeping = 2
+ mutex_locked = 0x1
+ mutex_sleeping = 0x2 // Ensure futex's low 32 bits won't be all zeros
active_spin = 4
active_spin_cnt = 30
passive_spin = 1
)
-// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
-// mutex_sleeping means that there is presumably at least one sleeping thread.
-// Note that there can be spinning threads during all states - they do not
-// affect mutex's state.
+// The mutex.key holds two state flags in its lowest bits: When the mutex_locked
+// bit is set, the mutex is locked. When the mutex_sleeping bit is set, a thread
+// is waiting in futexsleep for the mutex to be available. These flags operate
+// independently: a thread can enter lock2, observe that another thread is
+// already asleep, and immediately try to grab the lock anyway without waiting
+// for its "fair" turn.
// We use the uintptr mutex.key and note.key as a uint32.
//
func lock2(l *mutex) {
gp := getg()
-
if gp.m.locks < 0 {
throw("runtime·lock: lock count")
}
gp.m.locks++
// Speculative grab for lock.
- v := atomic.Xchg(key32(&l.key), mutex_locked)
- if v == mutex_unlocked {
+ if atomic.Casuintptr(&l.key, 0, mutex_locked) {
return
}
- // wait is either MUTEX_LOCKED or MUTEX_SLEEPING
- // depending on whether there is a thread sleeping
- // on this mutex. If we ever change l->key from
- // MUTEX_SLEEPING to some other value, we must be
- // careful to change it back to MUTEX_SLEEPING before
- // returning, to ensure that the sleeping thread gets
- // its wakeup call.
- wait := v
-
timer := &lockTimer{lock: l}
timer.begin()
// On uniprocessors, no point spinning.
if ncpu > 1 {
spin = active_spin
}
- for {
- // Try for lock, spinning.
- for i := 0; i < spin; i++ {
- for l.key == mutex_unlocked {
- if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
- timer.end()
- return
- }
+Loop:
+ for i := 0; ; i++ {
+ v := atomic.Loaduintptr(&l.key)
+ if v&mutex_locked == 0 {
+ // Unlocked. Try to lock.
+ if atomic.Casuintptr(&l.key, v, v|mutex_locked) {
+ timer.end()
+ return
}
- procyield(active_spin_cnt)
+ i = 0
}
-
- // Try for lock, rescheduling.
- for i := 0; i < passive_spin; i++ {
- for l.key == mutex_unlocked {
- if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
- timer.end()
- return
+ if i < spin {
+ procyield(active_spin_cnt)
+ } else if i < spin+passive_spin {
+ osyield()
+ } else {
+ // Someone else has it.
+ for {
+ head := v &^ (mutex_locked | mutex_sleeping)
+ if atomic.Casuintptr(&l.key, v, head|mutex_locked|mutex_sleeping) {
+ break
+ }
+ v = atomic.Loaduintptr(&l.key)
+ if v&mutex_locked == 0 {
+ continue Loop
}
}
- osyield()
- }
-
- // Sleep.
- v = atomic.Xchg(key32(&l.key), mutex_sleeping)
- if v == mutex_unlocked {
- timer.end()
- return
+ if v&mutex_locked != 0 {
+ // Queued. Wait.
+ futexsleep(key32(&l.key), uint32(v), -1)
+ i = 0
+ }
}
- wait = mutex_sleeping
- futexsleep(key32(&l.key), mutex_sleeping, -1)
}
}
}
func unlock2(l *mutex) {
- v := atomic.Xchg(key32(&l.key), mutex_unlocked)
- if v == mutex_unlocked {
- throw("unlock of unlocked lock")
- }
- if v == mutex_sleeping {
- futexwakeup(key32(&l.key), 1)
+ for {
+ v := atomic.Loaduintptr(&l.key)
+ if v == mutex_locked {
+ if atomic.Casuintptr(&l.key, mutex_locked, 0) {
+ break
+ }
+ } else if v&mutex_locked == 0 {
+ throw("unlock of unlocked lock")
+ } else {
+ // Other M's are waiting for the lock.
+ if atomic.Casuintptr(&l.key, v, v&^mutex_locked) {
+ futexwakeup(key32(&l.key), 1)
+ break
+ }
+ }
}
gp := getg()