// Acquire the lock. // Loops (spins) until the lock is acquired. void acquire(struct spinlock *lk) { push_off(); // disable interrupts to avoid deadlock. if(holding(lk)) panic("acquire");
// On RISC-V, sync_lock_test_and_set turns into an atomic swap: // a5 = 1 // s1 = &lk->locked // amoswap.w.aq a5, a5, (s1) while(__sync_lock_test_and_set(&lk->locked, 1) != 0) ;
// Tell the C compiler and the processor to not move loads or stores // past this point, to ensure that the critical section's memory // references happen strictly after the lock is acquired. // On RISC-V, this emits a fence instruction. __sync_synchronize();
// Record info about lock acquisition for holding() and debugging. lk->cpu = mycpu(); }
// Tell the C compiler and the CPU to not move loads or stores // past this point, to ensure that all the stores in the critical // section are visible to other CPUs before the lock is released, // and that loads in the critical section occur strictly before // the lock is released. // On RISC-V, this emits a fence instruction. __sync_synchronize();
// Release the lock, equivalent to lk->locked = 0. // This code doesn't use a C assignment, since the C standard // implies that an assignment might be implemented with // multiple store instructions. // On RISC-V, sync_lock_release turns into an atomic swap: // s1 = &lk->locked // amoswap.w zero, zero, (s1) __sync_lock_release(&lk->locked);
pop_off(); }
可以看出自旋锁是基于指令集提供的原子 compare and swap 操作实现的,如果存在锁竞争的情况,没有获取到锁的一侧会自旋等到锁的释放。
// Atomically release lock and sleep on chan. // Reacquires lock when awakened. void sleep(void *chan, struct spinlock *lk) { structproc *p = myproc(); // Must acquire p->lock in order to // change p->state and then call sched. // Once we hold p->lock, we can be // guaranteed that we won't miss any wakeup // (wakeup locks p->lock), // so it's okay to release lk.
acquire(&p->lock); //DOC: sleeplock1 release(lk);
// Go to sleep. p->chan = chan; p->state = SLEEPING;
sched();
// Tidy up. p->chan = 0;
// Reacquire original lock. release(&p->lock); acquire(lk); }
// Wake up all processes sleeping on chan. // Must be called without any p->lock. void wakeup(void *chan) { structproc *p;
int __pthread_mutex_lock(pthread_mutex_t *m) { if ((m->_m_type&15) == PTHREAD_MUTEX_NORMAL && !a_cas(&m->_m_lock, 0, EBUSY)) return0;
return __pthread_mutex_timedlock(m, 0); }
int __pthread_mutex_timedlock(pthread_mutex_t *restrict m, conststruct timespec *restrict at) { if ((m->_m_type&15) == PTHREAD_MUTEX_NORMAL && !a_cas(&m->_m_lock, 0, EBUSY)) return0;
int type = m->_m_type; int r, t, priv = (type & 128) ^ 128;
r = __pthread_mutex_trylock(m); if (r != EBUSY) return r;
if (type&8) return pthread_mutex_timedlock_pi(m, at); int spins = 100; while (spins-- && m->_m_lock && !m->_m_waiters) a_spin();
while ((r=__pthread_mutex_trylock(m)) == EBUSY) { r = m->_m_lock; int own = r & 0x3fffffff; if (!own && (!r || (type&4))) continue; if ((type&3) == PTHREAD_MUTEX_ERRORCHECK && own == __pthread_self()->tid) return EDEADLK;
a_inc(&m->_m_waiters); t = r | 0x80000000; a_cas(&m->_m_lock, r, t); r = __timedwait(&m->_m_lock, t, CLOCK_REALTIME, at, priv); a_dec(&m->_m_waiters); if (r && r != EINTR) break; } return r; }