// If any procs are sleeping on addr, wake up at most cnt.
const (
- mutex_locked = 0x1
- mutex_sleeping = 0x2 // Ensure futex's low 32 bits won't be all zeros
+ mutex_unlocked = 0
+ mutex_locked = 1
+ mutex_sleeping = 2
active_spin = 4
active_spin_cnt = 30
passive_spin = 1
)
-// The mutex.key holds two state flags in its lowest bits: When the mutex_locked
-// bit is set, the mutex is locked. When the mutex_sleeping bit is set, a thread
-// is waiting in futexsleep for the mutex to be available. These flags operate
-// independently: a thread can enter lock2, observe that another thread is
-// already asleep, and immediately try to grab the lock anyway without waiting
-// for its "fair" turn.
+// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
+// mutex_sleeping means that there is presumably at least one sleeping thread.
+// Note that there can be spinning threads during all states - they do not
+// affect mutex's state.
// We use the uintptr mutex.key and note.key as a uint32.
//
func lock2(l *mutex) {
gp := getg()
+
if gp.m.locks < 0 {
throw("runtime·lock: lock count")
}
gp.m.locks++
// Speculative grab for lock.
- if atomic.Casuintptr(&l.key, 0, mutex_locked) {
+ v := atomic.Xchg(key32(&l.key), mutex_locked)
+ if v == mutex_unlocked {
return
}
+ // wait is either MUTEX_LOCKED or MUTEX_SLEEPING
+ // depending on whether there is a thread sleeping
+ // on this mutex. If we ever change l->key from
+ // MUTEX_SLEEPING to some other value, we must be
+ // careful to change it back to MUTEX_SLEEPING before
+ // returning, to ensure that the sleeping thread gets
+ // its wakeup call.
+ wait := v
+
timer := &lockTimer{lock: l}
timer.begin()
// On uniprocessors, no point spinning.
if ncpu > 1 {
spin = active_spin
}
-Loop:
- for i := 0; ; i++ {
- v := atomic.Loaduintptr(&l.key)
- if v&mutex_locked == 0 {
- // Unlocked. Try to lock.
- if atomic.Casuintptr(&l.key, v, v|mutex_locked) {
- timer.end()
- return
+ for {
+ // Try for lock, spinning.
+ for i := 0; i < spin; i++ {
+ for l.key == mutex_unlocked {
+ if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
+ timer.end()
+ return
+ }
}
- i = 0
- }
- if i < spin {
procyield(active_spin_cnt)
- } else if i < spin+passive_spin {
- osyield()
- } else {
- // Someone else has it.
- for {
- head := v &^ (mutex_locked | mutex_sleeping)
- if atomic.Casuintptr(&l.key, v, head|mutex_locked|mutex_sleeping) {
- break
- }
- v = atomic.Loaduintptr(&l.key)
- if v&mutex_locked == 0 {
- continue Loop
+ }
+
+ // Try for lock, rescheduling.
+ for i := 0; i < passive_spin; i++ {
+ for l.key == mutex_unlocked {
+ if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
+ timer.end()
+ return
}
}
- if v&mutex_locked != 0 {
- // Queued. Wait.
- futexsleep(key32(&l.key), uint32(v), -1)
- i = 0
- }
+ osyield()
}
+
+ // Sleep.
+ v = atomic.Xchg(key32(&l.key), mutex_sleeping)
+ if v == mutex_unlocked {
+ timer.end()
+ return
+ }
+ wait = mutex_sleeping
+ futexsleep(key32(&l.key), mutex_sleeping, -1)
}
}
}
func unlock2(l *mutex) {
- for {
- v := atomic.Loaduintptr(&l.key)
- if v == mutex_locked {
- if atomic.Casuintptr(&l.key, mutex_locked, 0) {
- break
- }
- } else if v&mutex_locked == 0 {
- throw("unlock of unlocked lock")
- } else {
- // Other M's are waiting for the lock.
- if atomic.Casuintptr(&l.key, v, v&^mutex_locked) {
- futexwakeup(key32(&l.key), 1)
- break
- }
- }
+ v := atomic.Xchg(key32(&l.key), mutex_unlocked)
+ if v == mutex_unlocked {
+ throw("unlock of unlocked lock")
+ }
+ if v == mutex_sleeping {
+ futexwakeup(key32(&l.key), 1)
}
gp := getg()