0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/smp.h>
0010 #include <linux/bug.h>
0011 #include <linux/cpumask.h>
0012 #include <linux/percpu.h>
0013 #include <linux/hardirq.h>
0014 #include <linux/spinlock.h>
0015 #include <trace/events/lock.h>
0016
0017
0018
0019
0020
0021 void queued_read_lock_slowpath(struct qrwlock *lock)
0022 {
0023
0024
0025
0026 if (unlikely(in_interrupt())) {
0027
0028
0029
0030
0031
0032
0033 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
0034 return;
0035 }
0036 atomic_sub(_QR_BIAS, &lock->cnts);
0037
0038 trace_contention_begin(lock, LCB_F_SPIN | LCB_F_READ);
0039
0040
0041
0042
0043 arch_spin_lock(&lock->wait_lock);
0044 atomic_add(_QR_BIAS, &lock->cnts);
0045
0046
0047
0048
0049
0050
0051 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
0052
0053
0054
0055
0056 arch_spin_unlock(&lock->wait_lock);
0057
0058 trace_contention_end(lock, 0);
0059 }
0060 EXPORT_SYMBOL(queued_read_lock_slowpath);
0061
0062
0063
0064
0065
0066 void queued_write_lock_slowpath(struct qrwlock *lock)
0067 {
0068 int cnts;
0069
0070 trace_contention_begin(lock, LCB_F_SPIN | LCB_F_WRITE);
0071
0072
0073 arch_spin_lock(&lock->wait_lock);
0074
0075
0076 if (!(cnts = atomic_read(&lock->cnts)) &&
0077 atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))
0078 goto unlock;
0079
0080
0081 atomic_or(_QW_WAITING, &lock->cnts);
0082
0083
0084 do {
0085 cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING);
0086 } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED));
0087 unlock:
0088 arch_spin_unlock(&lock->wait_lock);
0089
0090 trace_contention_end(lock, 0);
0091 }
0092 EXPORT_SYMBOL(queued_write_lock_slowpath);