Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* kernel/rwsem.c: R/W semaphores, public implementation
0003  *
0004  * Written by David Howells (dhowells@redhat.com).
0005  * Derived from asm-i386/semaphore.h
0006  *
0007  * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
0008  * and Michel Lespinasse <walken@google.com>
0009  *
0010  * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
0011  * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
0012  *
0013  * Rwsem count bit fields re-definition and rwsem rearchitecture by
0014  * Waiman Long <longman@redhat.com> and
0015  * Peter Zijlstra <peterz@infradead.org>.
0016  */
0017 
0018 #include <linux/types.h>
0019 #include <linux/kernel.h>
0020 #include <linux/sched.h>
0021 #include <linux/sched/rt.h>
0022 #include <linux/sched/task.h>
0023 #include <linux/sched/debug.h>
0024 #include <linux/sched/wake_q.h>
0025 #include <linux/sched/signal.h>
0026 #include <linux/sched/clock.h>
0027 #include <linux/export.h>
0028 #include <linux/rwsem.h>
0029 #include <linux/atomic.h>
0030 #include <trace/events/lock.h>
0031 
0032 #ifndef CONFIG_PREEMPT_RT
0033 #include "lock_events.h"
0034 
0035 /*
0036  * The least significant 2 bits of the owner value has the following
0037  * meanings when set.
0038  *  - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
0039  *  - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock
0040  *
0041  * When the rwsem is reader-owned and a spinning writer has timed out,
0042  * the nonspinnable bit will be set to disable optimistic spinning.
0043 
0044  * When a writer acquires a rwsem, it puts its task_struct pointer
0045  * into the owner field. It is cleared after an unlock.
0046  *
0047  * When a reader acquires a rwsem, it will also puts its task_struct
0048  * pointer into the owner field with the RWSEM_READER_OWNED bit set.
0049  * On unlock, the owner field will largely be left untouched. So
0050  * for a free or reader-owned rwsem, the owner value may contain
0051  * information about the last reader that acquires the rwsem.
0052  *
0053  * That information may be helpful in debugging cases where the system
0054  * seems to hang on a reader owned rwsem especially if only one reader
0055  * is involved. Ideally we would like to track all the readers that own
0056  * a rwsem, but the overhead is simply too big.
0057  *
0058  * A fast path reader optimistic lock stealing is supported when the rwsem
0059  * is previously owned by a writer and the following conditions are met:
0060  *  - rwsem is not currently writer owned
0061  *  - the handoff isn't set.
0062  */
0063 #define RWSEM_READER_OWNED  (1UL << 0)
0064 #define RWSEM_NONSPINNABLE  (1UL << 1)
0065 #define RWSEM_OWNER_FLAGS_MASK  (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
0066 
0067 #ifdef CONFIG_DEBUG_RWSEMS
0068 # define DEBUG_RWSEMS_WARN_ON(c, sem)   do {            \
0069     if (!debug_locks_silent &&              \
0070         WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
0071         #c, atomic_long_read(&(sem)->count),        \
0072         (unsigned long) sem->magic,         \
0073         atomic_long_read(&(sem)->owner), (long)current, \
0074         list_empty(&(sem)->wait_list) ? "" : "not "))   \
0075             debug_locks_off();          \
0076     } while (0)
0077 #else
0078 # define DEBUG_RWSEMS_WARN_ON(c, sem)
0079 #endif
0080 
0081 /*
0082  * On 64-bit architectures, the bit definitions of the count are:
0083  *
0084  * Bit  0    - writer locked bit
0085  * Bit  1    - waiters present bit
0086  * Bit  2    - lock handoff bit
0087  * Bits 3-7  - reserved
0088  * Bits 8-62 - 55-bit reader count
0089  * Bit  63   - read fail bit
0090  *
0091  * On 32-bit architectures, the bit definitions of the count are:
0092  *
0093  * Bit  0    - writer locked bit
0094  * Bit  1    - waiters present bit
0095  * Bit  2    - lock handoff bit
0096  * Bits 3-7  - reserved
0097  * Bits 8-30 - 23-bit reader count
0098  * Bit  31   - read fail bit
0099  *
0100  * It is not likely that the most significant bit (read fail bit) will ever
0101  * be set. This guard bit is still checked anyway in the down_read() fastpath
0102  * just in case we need to use up more of the reader bits for other purpose
0103  * in the future.
0104  *
0105  * atomic_long_fetch_add() is used to obtain reader lock, whereas
0106  * atomic_long_cmpxchg() will be used to obtain writer lock.
0107  *
0108  * There are three places where the lock handoff bit may be set or cleared.
0109  * 1) rwsem_mark_wake() for readers     -- set, clear
0110  * 2) rwsem_try_write_lock() for writers    -- set, clear
0111  * 3) rwsem_del_waiter()            -- clear
0112  *
0113  * For all the above cases, wait_lock will be held. A writer must also
0114  * be the first one in the wait_list to be eligible for setting the handoff
0115  * bit. So concurrent setting/clearing of handoff bit is not possible.
0116  */
0117 #define RWSEM_WRITER_LOCKED (1UL << 0)
0118 #define RWSEM_FLAG_WAITERS  (1UL << 1)
0119 #define RWSEM_FLAG_HANDOFF  (1UL << 2)
0120 #define RWSEM_FLAG_READFAIL (1UL << (BITS_PER_LONG - 1))
0121 
0122 #define RWSEM_READER_SHIFT  8
0123 #define RWSEM_READER_BIAS   (1UL << RWSEM_READER_SHIFT)
0124 #define RWSEM_READER_MASK   (~(RWSEM_READER_BIAS - 1))
0125 #define RWSEM_WRITER_MASK   RWSEM_WRITER_LOCKED
0126 #define RWSEM_LOCK_MASK     (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
0127 #define RWSEM_READ_FAILED_MASK  (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
0128                  RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
0129 
0130 /*
0131  * All writes to owner are protected by WRITE_ONCE() to make sure that
0132  * store tearing can't happen as optimistic spinners may read and use
0133  * the owner value concurrently without lock. Read from owner, however,
0134  * may not need READ_ONCE() as long as the pointer value is only used
0135  * for comparison and isn't being dereferenced.
0136  */
0137 static inline void rwsem_set_owner(struct rw_semaphore *sem)
0138 {
0139     atomic_long_set(&sem->owner, (long)current);
0140 }
0141 
0142 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
0143 {
0144     atomic_long_set(&sem->owner, 0);
0145 }
0146 
0147 /*
0148  * Test the flags in the owner field.
0149  */
0150 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
0151 {
0152     return atomic_long_read(&sem->owner) & flags;
0153 }
0154 
0155 /*
0156  * The task_struct pointer of the last owning reader will be left in
0157  * the owner field.
0158  *
0159  * Note that the owner value just indicates the task has owned the rwsem
0160  * previously, it may not be the real owner or one of the real owners
0161  * anymore when that field is examined, so take it with a grain of salt.
0162  *
0163  * The reader non-spinnable bit is preserved.
0164  */
0165 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
0166                         struct task_struct *owner)
0167 {
0168     unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
0169         (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE);
0170 
0171     atomic_long_set(&sem->owner, val);
0172 }
0173 
0174 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
0175 {
0176     __rwsem_set_reader_owned(sem, current);
0177 }
0178 
0179 /*
0180  * Return true if the rwsem is owned by a reader.
0181  */
0182 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
0183 {
0184 #ifdef CONFIG_DEBUG_RWSEMS
0185     /*
0186      * Check the count to see if it is write-locked.
0187      */
0188     long count = atomic_long_read(&sem->count);
0189 
0190     if (count & RWSEM_WRITER_MASK)
0191         return false;
0192 #endif
0193     return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
0194 }
0195 
0196 #ifdef CONFIG_DEBUG_RWSEMS
0197 /*
0198  * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
0199  * is a task pointer in owner of a reader-owned rwsem, it will be the
0200  * real owner or one of the real owners. The only exception is when the
0201  * unlock is done by up_read_non_owner().
0202  */
0203 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
0204 {
0205     unsigned long val = atomic_long_read(&sem->owner);
0206 
0207     while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
0208         if (atomic_long_try_cmpxchg(&sem->owner, &val,
0209                         val & RWSEM_OWNER_FLAGS_MASK))
0210             return;
0211     }
0212 }
0213 #else
0214 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
0215 {
0216 }
0217 #endif
0218 
0219 /*
0220  * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
0221  * remains set. Otherwise, the operation will be aborted.
0222  */
0223 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
0224 {
0225     unsigned long owner = atomic_long_read(&sem->owner);
0226 
0227     do {
0228         if (!(owner & RWSEM_READER_OWNED))
0229             break;
0230         if (owner & RWSEM_NONSPINNABLE)
0231             break;
0232     } while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
0233                       owner | RWSEM_NONSPINNABLE));
0234 }
0235 
0236 static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
0237 {
0238     *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
0239 
0240     if (WARN_ON_ONCE(*cntp < 0))
0241         rwsem_set_nonspinnable(sem);
0242 
0243     if (!(*cntp & RWSEM_READ_FAILED_MASK)) {
0244         rwsem_set_reader_owned(sem);
0245         return true;
0246     }
0247 
0248     return false;
0249 }
0250 
0251 static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
0252 {
0253     long tmp = RWSEM_UNLOCKED_VALUE;
0254 
0255     if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
0256         rwsem_set_owner(sem);
0257         return true;
0258     }
0259 
0260     return false;
0261 }
0262 
0263 /*
0264  * Return just the real task structure pointer of the owner
0265  */
0266 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
0267 {
0268     return (struct task_struct *)
0269         (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
0270 }
0271 
0272 /*
0273  * Return the real task structure pointer of the owner and the embedded
0274  * flags in the owner. pflags must be non-NULL.
0275  */
0276 static inline struct task_struct *
0277 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
0278 {
0279     unsigned long owner = atomic_long_read(&sem->owner);
0280 
0281     *pflags = owner & RWSEM_OWNER_FLAGS_MASK;
0282     return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
0283 }
0284 
0285 /*
0286  * Guide to the rw_semaphore's count field.
0287  *
0288  * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
0289  * by a writer.
0290  *
0291  * The lock is owned by readers when
0292  * (1) the RWSEM_WRITER_LOCKED isn't set in count,
0293  * (2) some of the reader bits are set in count, and
0294  * (3) the owner field has RWSEM_READ_OWNED bit set.
0295  *
0296  * Having some reader bits set is not enough to guarantee a readers owned
0297  * lock as the readers may be in the process of backing out from the count
0298  * and a writer has just released the lock. So another writer may steal
0299  * the lock immediately after that.
0300  */
0301 
0302 /*
0303  * Initialize an rwsem:
0304  */
0305 void __init_rwsem(struct rw_semaphore *sem, const char *name,
0306           struct lock_class_key *key)
0307 {
0308 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0309     /*
0310      * Make sure we are not reinitializing a held semaphore:
0311      */
0312     debug_check_no_locks_freed((void *)sem, sizeof(*sem));
0313     lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
0314 #endif
0315 #ifdef CONFIG_DEBUG_RWSEMS
0316     sem->magic = sem;
0317 #endif
0318     atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
0319     raw_spin_lock_init(&sem->wait_lock);
0320     INIT_LIST_HEAD(&sem->wait_list);
0321     atomic_long_set(&sem->owner, 0L);
0322 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
0323     osq_lock_init(&sem->osq);
0324 #endif
0325 }
0326 EXPORT_SYMBOL(__init_rwsem);
0327 
0328 enum rwsem_waiter_type {
0329     RWSEM_WAITING_FOR_WRITE,
0330     RWSEM_WAITING_FOR_READ
0331 };
0332 
0333 struct rwsem_waiter {
0334     struct list_head list;
0335     struct task_struct *task;
0336     enum rwsem_waiter_type type;
0337     unsigned long timeout;
0338     bool handoff_set;
0339 };
0340 #define rwsem_first_waiter(sem) \
0341     list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
0342 
0343 enum rwsem_wake_type {
0344     RWSEM_WAKE_ANY,     /* Wake whatever's at head of wait list */
0345     RWSEM_WAKE_READERS, /* Wake readers only */
0346     RWSEM_WAKE_READ_OWNED   /* Waker thread holds the read lock */
0347 };
0348 
0349 /*
0350  * The typical HZ value is either 250 or 1000. So set the minimum waiting
0351  * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
0352  * queue before initiating the handoff protocol.
0353  */
0354 #define RWSEM_WAIT_TIMEOUT  DIV_ROUND_UP(HZ, 250)
0355 
0356 /*
0357  * Magic number to batch-wakeup waiting readers, even when writers are
0358  * also present in the queue. This both limits the amount of work the
0359  * waking thread must do and also prevents any potential counter overflow,
0360  * however unlikely.
0361  */
0362 #define MAX_READERS_WAKEUP  0x100
0363 
0364 static inline void
0365 rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
0366 {
0367     lockdep_assert_held(&sem->wait_lock);
0368     list_add_tail(&waiter->list, &sem->wait_list);
0369     /* caller will set RWSEM_FLAG_WAITERS */
0370 }
0371 
0372 /*
0373  * Remove a waiter from the wait_list and clear flags.
0374  *
0375  * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of
0376  * this function. Modify with care.
0377  *
0378  * Return: true if wait_list isn't empty and false otherwise
0379  */
0380 static inline bool
0381 rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
0382 {
0383     lockdep_assert_held(&sem->wait_lock);
0384     list_del(&waiter->list);
0385     if (likely(!list_empty(&sem->wait_list)))
0386         return true;
0387 
0388     atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count);
0389     return false;
0390 }
0391 
0392 /*
0393  * handle the lock release when processes blocked on it that can now run
0394  * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
0395  *   have been set.
0396  * - there must be someone on the queue
0397  * - the wait_lock must be held by the caller
0398  * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
0399  *   to actually wakeup the blocked task(s) and drop the reference count,
0400  *   preferably when the wait_lock is released
0401  * - woken process blocks are discarded from the list after having task zeroed
0402  * - writers are only marked woken if downgrading is false
0403  *
0404  * Implies rwsem_del_waiter() for all woken readers.
0405  */
0406 static void rwsem_mark_wake(struct rw_semaphore *sem,
0407                 enum rwsem_wake_type wake_type,
0408                 struct wake_q_head *wake_q)
0409 {
0410     struct rwsem_waiter *waiter, *tmp;
0411     long oldcount, woken = 0, adjustment = 0;
0412     struct list_head wlist;
0413 
0414     lockdep_assert_held(&sem->wait_lock);
0415 
0416     /*
0417      * Take a peek at the queue head waiter such that we can determine
0418      * the wakeup(s) to perform.
0419      */
0420     waiter = rwsem_first_waiter(sem);
0421 
0422     if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
0423         if (wake_type == RWSEM_WAKE_ANY) {
0424             /*
0425              * Mark writer at the front of the queue for wakeup.
0426              * Until the task is actually later awoken later by
0427              * the caller, other writers are able to steal it.
0428              * Readers, on the other hand, will block as they
0429              * will notice the queued writer.
0430              */
0431             wake_q_add(wake_q, waiter->task);
0432             lockevent_inc(rwsem_wake_writer);
0433         }
0434 
0435         return;
0436     }
0437 
0438     /*
0439      * No reader wakeup if there are too many of them already.
0440      */
0441     if (unlikely(atomic_long_read(&sem->count) < 0))
0442         return;
0443 
0444     /*
0445      * Writers might steal the lock before we grant it to the next reader.
0446      * We prefer to do the first reader grant before counting readers
0447      * so we can bail out early if a writer stole the lock.
0448      */
0449     if (wake_type != RWSEM_WAKE_READ_OWNED) {
0450         struct task_struct *owner;
0451 
0452         adjustment = RWSEM_READER_BIAS;
0453         oldcount = atomic_long_fetch_add(adjustment, &sem->count);
0454         if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
0455             /*
0456              * When we've been waiting "too" long (for writers
0457              * to give up the lock), request a HANDOFF to
0458              * force the issue.
0459              */
0460             if (time_after(jiffies, waiter->timeout)) {
0461                 if (!(oldcount & RWSEM_FLAG_HANDOFF)) {
0462                     adjustment -= RWSEM_FLAG_HANDOFF;
0463                     lockevent_inc(rwsem_rlock_handoff);
0464                 }
0465                 waiter->handoff_set = true;
0466             }
0467 
0468             atomic_long_add(-adjustment, &sem->count);
0469             return;
0470         }
0471         /*
0472          * Set it to reader-owned to give spinners an early
0473          * indication that readers now have the lock.
0474          * The reader nonspinnable bit seen at slowpath entry of
0475          * the reader is copied over.
0476          */
0477         owner = waiter->task;
0478         __rwsem_set_reader_owned(sem, owner);
0479     }
0480 
0481     /*
0482      * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
0483      * queue. We know that the woken will be at least 1 as we accounted
0484      * for above. Note we increment the 'active part' of the count by the
0485      * number of readers before waking any processes up.
0486      *
0487      * This is an adaptation of the phase-fair R/W locks where at the
0488      * reader phase (first waiter is a reader), all readers are eligible
0489      * to acquire the lock at the same time irrespective of their order
0490      * in the queue. The writers acquire the lock according to their
0491      * order in the queue.
0492      *
0493      * We have to do wakeup in 2 passes to prevent the possibility that
0494      * the reader count may be decremented before it is incremented. It
0495      * is because the to-be-woken waiter may not have slept yet. So it
0496      * may see waiter->task got cleared, finish its critical section and
0497      * do an unlock before the reader count increment.
0498      *
0499      * 1) Collect the read-waiters in a separate list, count them and
0500      *    fully increment the reader count in rwsem.
0501      * 2) For each waiters in the new list, clear waiter->task and
0502      *    put them into wake_q to be woken up later.
0503      */
0504     INIT_LIST_HEAD(&wlist);
0505     list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
0506         if (waiter->type == RWSEM_WAITING_FOR_WRITE)
0507             continue;
0508 
0509         woken++;
0510         list_move_tail(&waiter->list, &wlist);
0511 
0512         /*
0513          * Limit # of readers that can be woken up per wakeup call.
0514          */
0515         if (unlikely(woken >= MAX_READERS_WAKEUP))
0516             break;
0517     }
0518 
0519     adjustment = woken * RWSEM_READER_BIAS - adjustment;
0520     lockevent_cond_inc(rwsem_wake_reader, woken);
0521 
0522     oldcount = atomic_long_read(&sem->count);
0523     if (list_empty(&sem->wait_list)) {
0524         /*
0525          * Combined with list_move_tail() above, this implies
0526          * rwsem_del_waiter().
0527          */
0528         adjustment -= RWSEM_FLAG_WAITERS;
0529         if (oldcount & RWSEM_FLAG_HANDOFF)
0530             adjustment -= RWSEM_FLAG_HANDOFF;
0531     } else if (woken) {
0532         /*
0533          * When we've woken a reader, we no longer need to force
0534          * writers to give up the lock and we can clear HANDOFF.
0535          */
0536         if (oldcount & RWSEM_FLAG_HANDOFF)
0537             adjustment -= RWSEM_FLAG_HANDOFF;
0538     }
0539 
0540     if (adjustment)
0541         atomic_long_add(adjustment, &sem->count);
0542 
0543     /* 2nd pass */
0544     list_for_each_entry_safe(waiter, tmp, &wlist, list) {
0545         struct task_struct *tsk;
0546 
0547         tsk = waiter->task;
0548         get_task_struct(tsk);
0549 
0550         /*
0551          * Ensure calling get_task_struct() before setting the reader
0552          * waiter to nil such that rwsem_down_read_slowpath() cannot
0553          * race with do_exit() by always holding a reference count
0554          * to the task to wakeup.
0555          */
0556         smp_store_release(&waiter->task, NULL);
0557         /*
0558          * Ensure issuing the wakeup (either by us or someone else)
0559          * after setting the reader waiter to nil.
0560          */
0561         wake_q_add_safe(wake_q, tsk);
0562     }
0563 }
0564 
0565 /*
0566  * Remove a waiter and try to wake up other waiters in the wait queue
0567  * This function is called from the out_nolock path of both the reader and
0568  * writer slowpaths with wait_lock held. It releases the wait_lock and
0569  * optionally wake up waiters before it returns.
0570  */
0571 static inline void
0572 rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter,
0573               struct wake_q_head *wake_q)
0574               __releases(&sem->wait_lock)
0575 {
0576     bool first = rwsem_first_waiter(sem) == waiter;
0577 
0578     wake_q_init(wake_q);
0579 
0580     /*
0581      * If the wait_list isn't empty and the waiter to be deleted is
0582      * the first waiter, we wake up the remaining waiters as they may
0583      * be eligible to acquire or spin on the lock.
0584      */
0585     if (rwsem_del_waiter(sem, waiter) && first)
0586         rwsem_mark_wake(sem, RWSEM_WAKE_ANY, wake_q);
0587     raw_spin_unlock_irq(&sem->wait_lock);
0588     if (!wake_q_empty(wake_q))
0589         wake_up_q(wake_q);
0590 }
0591 
0592 /*
0593  * This function must be called with the sem->wait_lock held to prevent
0594  * race conditions between checking the rwsem wait list and setting the
0595  * sem->count accordingly.
0596  *
0597  * Implies rwsem_del_waiter() on success.
0598  */
0599 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
0600                     struct rwsem_waiter *waiter)
0601 {
0602     struct rwsem_waiter *first = rwsem_first_waiter(sem);
0603     long count, new;
0604 
0605     lockdep_assert_held(&sem->wait_lock);
0606 
0607     count = atomic_long_read(&sem->count);
0608     do {
0609         bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
0610 
0611         if (has_handoff) {
0612             /*
0613              * Honor handoff bit and yield only when the first
0614              * waiter is the one that set it. Otherwisee, we
0615              * still try to acquire the rwsem.
0616              */
0617             if (first->handoff_set && (waiter != first))
0618                 return false;
0619 
0620             /*
0621              * First waiter can inherit a previously set handoff
0622              * bit and spin on rwsem if lock acquisition fails.
0623              */
0624             if (waiter == first)
0625                 waiter->handoff_set = true;
0626         }
0627 
0628         new = count;
0629 
0630         if (count & RWSEM_LOCK_MASK) {
0631             if (has_handoff || (!rt_task(waiter->task) &&
0632                         !time_after(jiffies, waiter->timeout)))
0633                 return false;
0634 
0635             new |= RWSEM_FLAG_HANDOFF;
0636         } else {
0637             new |= RWSEM_WRITER_LOCKED;
0638             new &= ~RWSEM_FLAG_HANDOFF;
0639 
0640             if (list_is_singular(&sem->wait_list))
0641                 new &= ~RWSEM_FLAG_WAITERS;
0642         }
0643     } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
0644 
0645     /*
0646      * We have either acquired the lock with handoff bit cleared or
0647      * set the handoff bit.
0648      */
0649     if (new & RWSEM_FLAG_HANDOFF) {
0650         waiter->handoff_set = true;
0651         lockevent_inc(rwsem_wlock_handoff);
0652         return false;
0653     }
0654 
0655     /*
0656      * Have rwsem_try_write_lock() fully imply rwsem_del_waiter() on
0657      * success.
0658      */
0659     list_del(&waiter->list);
0660     rwsem_set_owner(sem);
0661     return true;
0662 }
0663 
0664 /*
0665  * The rwsem_spin_on_owner() function returns the following 4 values
0666  * depending on the lock owner state.
0667  *   OWNER_NULL  : owner is currently NULL
0668  *   OWNER_WRITER: when owner changes and is a writer
0669  *   OWNER_READER: when owner changes and the new owner may be a reader.
0670  *   OWNER_NONSPINNABLE:
0671  *         when optimistic spinning has to stop because either the
0672  *         owner stops running, is unknown, or its timeslice has
0673  *         been used up.
0674  */
0675 enum owner_state {
0676     OWNER_NULL      = 1 << 0,
0677     OWNER_WRITER        = 1 << 1,
0678     OWNER_READER        = 1 << 2,
0679     OWNER_NONSPINNABLE  = 1 << 3,
0680 };
0681 
0682 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
0683 /*
0684  * Try to acquire write lock before the writer has been put on wait queue.
0685  */
0686 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
0687 {
0688     long count = atomic_long_read(&sem->count);
0689 
0690     while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
0691         if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
0692                     count | RWSEM_WRITER_LOCKED)) {
0693             rwsem_set_owner(sem);
0694             lockevent_inc(rwsem_opt_lock);
0695             return true;
0696         }
0697     }
0698     return false;
0699 }
0700 
0701 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
0702 {
0703     struct task_struct *owner;
0704     unsigned long flags;
0705     bool ret = true;
0706 
0707     if (need_resched()) {
0708         lockevent_inc(rwsem_opt_fail);
0709         return false;
0710     }
0711 
0712     preempt_disable();
0713     /*
0714      * Disable preemption is equal to the RCU read-side crital section,
0715      * thus the task_strcut structure won't go away.
0716      */
0717     owner = rwsem_owner_flags(sem, &flags);
0718     /*
0719      * Don't check the read-owner as the entry may be stale.
0720      */
0721     if ((flags & RWSEM_NONSPINNABLE) ||
0722         (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
0723         ret = false;
0724     preempt_enable();
0725 
0726     lockevent_cond_inc(rwsem_opt_fail, !ret);
0727     return ret;
0728 }
0729 
0730 #define OWNER_SPINNABLE     (OWNER_NULL | OWNER_WRITER | OWNER_READER)
0731 
0732 static inline enum owner_state
0733 rwsem_owner_state(struct task_struct *owner, unsigned long flags)
0734 {
0735     if (flags & RWSEM_NONSPINNABLE)
0736         return OWNER_NONSPINNABLE;
0737 
0738     if (flags & RWSEM_READER_OWNED)
0739         return OWNER_READER;
0740 
0741     return owner ? OWNER_WRITER : OWNER_NULL;
0742 }
0743 
0744 static noinline enum owner_state
0745 rwsem_spin_on_owner(struct rw_semaphore *sem)
0746 {
0747     struct task_struct *new, *owner;
0748     unsigned long flags, new_flags;
0749     enum owner_state state;
0750 
0751     lockdep_assert_preemption_disabled();
0752 
0753     owner = rwsem_owner_flags(sem, &flags);
0754     state = rwsem_owner_state(owner, flags);
0755     if (state != OWNER_WRITER)
0756         return state;
0757 
0758     for (;;) {
0759         /*
0760          * When a waiting writer set the handoff flag, it may spin
0761          * on the owner as well. Once that writer acquires the lock,
0762          * we can spin on it. So we don't need to quit even when the
0763          * handoff bit is set.
0764          */
0765         new = rwsem_owner_flags(sem, &new_flags);
0766         if ((new != owner) || (new_flags != flags)) {
0767             state = rwsem_owner_state(new, new_flags);
0768             break;
0769         }
0770 
0771         /*
0772          * Ensure we emit the owner->on_cpu, dereference _after_
0773          * checking sem->owner still matches owner, if that fails,
0774          * owner might point to free()d memory, if it still matches,
0775          * our spinning context already disabled preemption which is
0776          * equal to RCU read-side crital section ensures the memory
0777          * stays valid.
0778          */
0779         barrier();
0780 
0781         if (need_resched() || !owner_on_cpu(owner)) {
0782             state = OWNER_NONSPINNABLE;
0783             break;
0784         }
0785 
0786         cpu_relax();
0787     }
0788 
0789     return state;
0790 }
0791 
0792 /*
0793  * Calculate reader-owned rwsem spinning threshold for writer
0794  *
0795  * The more readers own the rwsem, the longer it will take for them to
0796  * wind down and free the rwsem. So the empirical formula used to
0797  * determine the actual spinning time limit here is:
0798  *
0799  *   Spinning threshold = (10 + nr_readers/2)us
0800  *
0801  * The limit is capped to a maximum of 25us (30 readers). This is just
0802  * a heuristic and is subjected to change in the future.
0803  */
0804 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
0805 {
0806     long count = atomic_long_read(&sem->count);
0807     int readers = count >> RWSEM_READER_SHIFT;
0808     u64 delta;
0809 
0810     if (readers > 30)
0811         readers = 30;
0812     delta = (20 + readers) * NSEC_PER_USEC / 2;
0813 
0814     return sched_clock() + delta;
0815 }
0816 
0817 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
0818 {
0819     bool taken = false;
0820     int prev_owner_state = OWNER_NULL;
0821     int loop = 0;
0822     u64 rspin_threshold = 0;
0823 
0824     preempt_disable();
0825 
0826     /* sem->wait_lock should not be held when doing optimistic spinning */
0827     if (!osq_lock(&sem->osq))
0828         goto done;
0829 
0830     /*
0831      * Optimistically spin on the owner field and attempt to acquire the
0832      * lock whenever the owner changes. Spinning will be stopped when:
0833      *  1) the owning writer isn't running; or
0834      *  2) readers own the lock and spinning time has exceeded limit.
0835      */
0836     for (;;) {
0837         enum owner_state owner_state;
0838 
0839         owner_state = rwsem_spin_on_owner(sem);
0840         if (!(owner_state & OWNER_SPINNABLE))
0841             break;
0842 
0843         /*
0844          * Try to acquire the lock
0845          */
0846         taken = rwsem_try_write_lock_unqueued(sem);
0847 
0848         if (taken)
0849             break;
0850 
0851         /*
0852          * Time-based reader-owned rwsem optimistic spinning
0853          */
0854         if (owner_state == OWNER_READER) {
0855             /*
0856              * Re-initialize rspin_threshold every time when
0857              * the owner state changes from non-reader to reader.
0858              * This allows a writer to steal the lock in between
0859              * 2 reader phases and have the threshold reset at
0860              * the beginning of the 2nd reader phase.
0861              */
0862             if (prev_owner_state != OWNER_READER) {
0863                 if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
0864                     break;
0865                 rspin_threshold = rwsem_rspin_threshold(sem);
0866                 loop = 0;
0867             }
0868 
0869             /*
0870              * Check time threshold once every 16 iterations to
0871              * avoid calling sched_clock() too frequently so
0872              * as to reduce the average latency between the times
0873              * when the lock becomes free and when the spinner
0874              * is ready to do a trylock.
0875              */
0876             else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
0877                 rwsem_set_nonspinnable(sem);
0878                 lockevent_inc(rwsem_opt_nospin);
0879                 break;
0880             }
0881         }
0882 
0883         /*
0884          * An RT task cannot do optimistic spinning if it cannot
0885          * be sure the lock holder is running or live-lock may
0886          * happen if the current task and the lock holder happen
0887          * to run in the same CPU. However, aborting optimistic
0888          * spinning while a NULL owner is detected may miss some
0889          * opportunity where spinning can continue without causing
0890          * problem.
0891          *
0892          * There are 2 possible cases where an RT task may be able
0893          * to continue spinning.
0894          *
0895          * 1) The lock owner is in the process of releasing the
0896          *    lock, sem->owner is cleared but the lock has not
0897          *    been released yet.
0898          * 2) The lock was free and owner cleared, but another
0899          *    task just comes in and acquire the lock before
0900          *    we try to get it. The new owner may be a spinnable
0901          *    writer.
0902          *
0903          * To take advantage of two scenarios listed above, the RT
0904          * task is made to retry one more time to see if it can
0905          * acquire the lock or continue spinning on the new owning
0906          * writer. Of course, if the time lag is long enough or the
0907          * new owner is not a writer or spinnable, the RT task will
0908          * quit spinning.
0909          *
0910          * If the owner is a writer, the need_resched() check is
0911          * done inside rwsem_spin_on_owner(). If the owner is not
0912          * a writer, need_resched() check needs to be done here.
0913          */
0914         if (owner_state != OWNER_WRITER) {
0915             if (need_resched())
0916                 break;
0917             if (rt_task(current) &&
0918                (prev_owner_state != OWNER_WRITER))
0919                 break;
0920         }
0921         prev_owner_state = owner_state;
0922 
0923         /*
0924          * The cpu_relax() call is a compiler barrier which forces
0925          * everything in this loop to be re-loaded. We don't need
0926          * memory barriers as we'll eventually observe the right
0927          * values at the cost of a few extra spins.
0928          */
0929         cpu_relax();
0930     }
0931     osq_unlock(&sem->osq);
0932 done:
0933     preempt_enable();
0934     lockevent_cond_inc(rwsem_opt_fail, !taken);
0935     return taken;
0936 }
0937 
0938 /*
0939  * Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should
0940  * only be called when the reader count reaches 0.
0941  */
0942 static inline void clear_nonspinnable(struct rw_semaphore *sem)
0943 {
0944     if (unlikely(rwsem_test_oflags(sem, RWSEM_NONSPINNABLE)))
0945         atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner);
0946 }
0947 
0948 #else
0949 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
0950 {
0951     return false;
0952 }
0953 
0954 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem)
0955 {
0956     return false;
0957 }
0958 
0959 static inline void clear_nonspinnable(struct rw_semaphore *sem) { }
0960 
0961 static inline enum owner_state
0962 rwsem_spin_on_owner(struct rw_semaphore *sem)
0963 {
0964     return OWNER_NONSPINNABLE;
0965 }
0966 #endif
0967 
0968 /*
0969  * Prepare to wake up waiter(s) in the wait queue by putting them into the
0970  * given wake_q if the rwsem lock owner isn't a writer. If rwsem is likely
0971  * reader-owned, wake up read lock waiters in queue front or wake up any
0972  * front waiter otherwise.
0973 
0974  * This is being called from both reader and writer slow paths.
0975  */
0976 static inline void rwsem_cond_wake_waiter(struct rw_semaphore *sem, long count,
0977                       struct wake_q_head *wake_q)
0978 {
0979     enum rwsem_wake_type wake_type;
0980 
0981     if (count & RWSEM_WRITER_MASK)
0982         return;
0983 
0984     if (count & RWSEM_READER_MASK) {
0985         wake_type = RWSEM_WAKE_READERS;
0986     } else {
0987         wake_type = RWSEM_WAKE_ANY;
0988         clear_nonspinnable(sem);
0989     }
0990     rwsem_mark_wake(sem, wake_type, wake_q);
0991 }
0992 
0993 /*
0994  * Wait for the read lock to be granted
0995  */
0996 static struct rw_semaphore __sched *
0997 rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state)
0998 {
0999     long adjustment = -RWSEM_READER_BIAS;
1000     long rcnt = (count >> RWSEM_READER_SHIFT);
1001     struct rwsem_waiter waiter;
1002     DEFINE_WAKE_Q(wake_q);
1003 
1004     /*
1005      * To prevent a constant stream of readers from starving a sleeping
1006      * waiter, don't attempt optimistic lock stealing if the lock is
1007      * currently owned by readers.
1008      */
1009     if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) &&
1010         (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED))
1011         goto queue;
1012 
1013     /*
1014      * Reader optimistic lock stealing.
1015      */
1016     if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) {
1017         rwsem_set_reader_owned(sem);
1018         lockevent_inc(rwsem_rlock_steal);
1019 
1020         /*
1021          * Wake up other readers in the wait queue if it is
1022          * the first reader.
1023          */
1024         if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) {
1025             raw_spin_lock_irq(&sem->wait_lock);
1026             if (!list_empty(&sem->wait_list))
1027                 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
1028                         &wake_q);
1029             raw_spin_unlock_irq(&sem->wait_lock);
1030             wake_up_q(&wake_q);
1031         }
1032         return sem;
1033     }
1034 
1035 queue:
1036     waiter.task = current;
1037     waiter.type = RWSEM_WAITING_FOR_READ;
1038     waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1039     waiter.handoff_set = false;
1040 
1041     raw_spin_lock_irq(&sem->wait_lock);
1042     if (list_empty(&sem->wait_list)) {
1043         /*
1044          * In case the wait queue is empty and the lock isn't owned
1045          * by a writer, this reader can exit the slowpath and return
1046          * immediately as its RWSEM_READER_BIAS has already been set
1047          * in the count.
1048          */
1049         if (!(atomic_long_read(&sem->count) & RWSEM_WRITER_MASK)) {
1050             /* Provide lock ACQUIRE */
1051             smp_acquire__after_ctrl_dep();
1052             raw_spin_unlock_irq(&sem->wait_lock);
1053             rwsem_set_reader_owned(sem);
1054             lockevent_inc(rwsem_rlock_fast);
1055             return sem;
1056         }
1057         adjustment += RWSEM_FLAG_WAITERS;
1058     }
1059     rwsem_add_waiter(sem, &waiter);
1060 
1061     /* we're now waiting on the lock, but no longer actively locking */
1062     count = atomic_long_add_return(adjustment, &sem->count);
1063 
1064     rwsem_cond_wake_waiter(sem, count, &wake_q);
1065     raw_spin_unlock_irq(&sem->wait_lock);
1066 
1067     if (!wake_q_empty(&wake_q))
1068         wake_up_q(&wake_q);
1069 
1070     trace_contention_begin(sem, LCB_F_READ);
1071 
1072     /* wait to be given the lock */
1073     for (;;) {
1074         set_current_state(state);
1075         if (!smp_load_acquire(&waiter.task)) {
1076             /* Matches rwsem_mark_wake()'s smp_store_release(). */
1077             break;
1078         }
1079         if (signal_pending_state(state, current)) {
1080             raw_spin_lock_irq(&sem->wait_lock);
1081             if (waiter.task)
1082                 goto out_nolock;
1083             raw_spin_unlock_irq(&sem->wait_lock);
1084             /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
1085             break;
1086         }
1087         schedule();
1088         lockevent_inc(rwsem_sleep_reader);
1089     }
1090 
1091     __set_current_state(TASK_RUNNING);
1092     lockevent_inc(rwsem_rlock);
1093     trace_contention_end(sem, 0);
1094     return sem;
1095 
1096 out_nolock:
1097     rwsem_del_wake_waiter(sem, &waiter, &wake_q);
1098     __set_current_state(TASK_RUNNING);
1099     lockevent_inc(rwsem_rlock_fail);
1100     trace_contention_end(sem, -EINTR);
1101     return ERR_PTR(-EINTR);
1102 }
1103 
1104 /*
1105  * Wait until we successfully acquire the write lock
1106  */
1107 static struct rw_semaphore __sched *
1108 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1109 {
1110     struct rwsem_waiter waiter;
1111     DEFINE_WAKE_Q(wake_q);
1112 
1113     /* do optimistic spinning and steal lock if possible */
1114     if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) {
1115         /* rwsem_optimistic_spin() implies ACQUIRE on success */
1116         return sem;
1117     }
1118 
1119     /*
1120      * Optimistic spinning failed, proceed to the slowpath
1121      * and block until we can acquire the sem.
1122      */
1123     waiter.task = current;
1124     waiter.type = RWSEM_WAITING_FOR_WRITE;
1125     waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1126     waiter.handoff_set = false;
1127 
1128     raw_spin_lock_irq(&sem->wait_lock);
1129     rwsem_add_waiter(sem, &waiter);
1130 
1131     /* we're now waiting on the lock */
1132     if (rwsem_first_waiter(sem) != &waiter) {
1133         rwsem_cond_wake_waiter(sem, atomic_long_read(&sem->count),
1134                        &wake_q);
1135         if (!wake_q_empty(&wake_q)) {
1136             /*
1137              * We want to minimize wait_lock hold time especially
1138              * when a large number of readers are to be woken up.
1139              */
1140             raw_spin_unlock_irq(&sem->wait_lock);
1141             wake_up_q(&wake_q);
1142             raw_spin_lock_irq(&sem->wait_lock);
1143         }
1144     } else {
1145         atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
1146     }
1147 
1148     /* wait until we successfully acquire the lock */
1149     set_current_state(state);
1150     trace_contention_begin(sem, LCB_F_WRITE);
1151 
1152     for (;;) {
1153         if (rwsem_try_write_lock(sem, &waiter)) {
1154             /* rwsem_try_write_lock() implies ACQUIRE on success */
1155             break;
1156         }
1157 
1158         raw_spin_unlock_irq(&sem->wait_lock);
1159 
1160         if (signal_pending_state(state, current))
1161             goto out_nolock;
1162 
1163         /*
1164          * After setting the handoff bit and failing to acquire
1165          * the lock, attempt to spin on owner to accelerate lock
1166          * transfer. If the previous owner is a on-cpu writer and it
1167          * has just released the lock, OWNER_NULL will be returned.
1168          * In this case, we attempt to acquire the lock again
1169          * without sleeping.
1170          */
1171         if (waiter.handoff_set) {
1172             enum owner_state owner_state;
1173 
1174             preempt_disable();
1175             owner_state = rwsem_spin_on_owner(sem);
1176             preempt_enable();
1177 
1178             if (owner_state == OWNER_NULL)
1179                 goto trylock_again;
1180         }
1181 
1182         schedule();
1183         lockevent_inc(rwsem_sleep_writer);
1184         set_current_state(state);
1185 trylock_again:
1186         raw_spin_lock_irq(&sem->wait_lock);
1187     }
1188     __set_current_state(TASK_RUNNING);
1189     raw_spin_unlock_irq(&sem->wait_lock);
1190     lockevent_inc(rwsem_wlock);
1191     trace_contention_end(sem, 0);
1192     return sem;
1193 
1194 out_nolock:
1195     __set_current_state(TASK_RUNNING);
1196     raw_spin_lock_irq(&sem->wait_lock);
1197     rwsem_del_wake_waiter(sem, &waiter, &wake_q);
1198     lockevent_inc(rwsem_wlock_fail);
1199     trace_contention_end(sem, -EINTR);
1200     return ERR_PTR(-EINTR);
1201 }
1202 
1203 /*
1204  * handle waking up a waiter on the semaphore
1205  * - up_read/up_write has decremented the active part of count if we come here
1206  */
1207 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
1208 {
1209     unsigned long flags;
1210     DEFINE_WAKE_Q(wake_q);
1211 
1212     raw_spin_lock_irqsave(&sem->wait_lock, flags);
1213 
1214     if (!list_empty(&sem->wait_list))
1215         rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1216 
1217     raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1218     wake_up_q(&wake_q);
1219 
1220     return sem;
1221 }
1222 
1223 /*
1224  * downgrade a write lock into a read lock
1225  * - caller incremented waiting part of count and discovered it still negative
1226  * - just wake up any readers at the front of the queue
1227  */
1228 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1229 {
1230     unsigned long flags;
1231     DEFINE_WAKE_Q(wake_q);
1232 
1233     raw_spin_lock_irqsave(&sem->wait_lock, flags);
1234 
1235     if (!list_empty(&sem->wait_list))
1236         rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
1237 
1238     raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1239     wake_up_q(&wake_q);
1240 
1241     return sem;
1242 }
1243 
1244 /*
1245  * lock for reading
1246  */
1247 static inline int __down_read_common(struct rw_semaphore *sem, int state)
1248 {
1249     long count;
1250 
1251     if (!rwsem_read_trylock(sem, &count)) {
1252         if (IS_ERR(rwsem_down_read_slowpath(sem, count, state)))
1253             return -EINTR;
1254         DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1255     }
1256     return 0;
1257 }
1258 
1259 static inline void __down_read(struct rw_semaphore *sem)
1260 {
1261     __down_read_common(sem, TASK_UNINTERRUPTIBLE);
1262 }
1263 
1264 static inline int __down_read_interruptible(struct rw_semaphore *sem)
1265 {
1266     return __down_read_common(sem, TASK_INTERRUPTIBLE);
1267 }
1268 
1269 static inline int __down_read_killable(struct rw_semaphore *sem)
1270 {
1271     return __down_read_common(sem, TASK_KILLABLE);
1272 }
1273 
1274 static inline int __down_read_trylock(struct rw_semaphore *sem)
1275 {
1276     long tmp;
1277 
1278     DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1279 
1280     tmp = atomic_long_read(&sem->count);
1281     while (!(tmp & RWSEM_READ_FAILED_MASK)) {
1282         if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1283                             tmp + RWSEM_READER_BIAS)) {
1284             rwsem_set_reader_owned(sem);
1285             return 1;
1286         }
1287     }
1288     return 0;
1289 }
1290 
1291 /*
1292  * lock for writing
1293  */
1294 static inline int __down_write_common(struct rw_semaphore *sem, int state)
1295 {
1296     if (unlikely(!rwsem_write_trylock(sem))) {
1297         if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
1298             return -EINTR;
1299     }
1300 
1301     return 0;
1302 }
1303 
1304 static inline void __down_write(struct rw_semaphore *sem)
1305 {
1306     __down_write_common(sem, TASK_UNINTERRUPTIBLE);
1307 }
1308 
1309 static inline int __down_write_killable(struct rw_semaphore *sem)
1310 {
1311     return __down_write_common(sem, TASK_KILLABLE);
1312 }
1313 
1314 static inline int __down_write_trylock(struct rw_semaphore *sem)
1315 {
1316     DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1317     return rwsem_write_trylock(sem);
1318 }
1319 
1320 /*
1321  * unlock after reading
1322  */
1323 static inline void __up_read(struct rw_semaphore *sem)
1324 {
1325     long tmp;
1326 
1327     DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1328     DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1329 
1330     rwsem_clear_reader_owned(sem);
1331     tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
1332     DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
1333     if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
1334               RWSEM_FLAG_WAITERS)) {
1335         clear_nonspinnable(sem);
1336         rwsem_wake(sem);
1337     }
1338 }
1339 
1340 /*
1341  * unlock after writing
1342  */
1343 static inline void __up_write(struct rw_semaphore *sem)
1344 {
1345     long tmp;
1346 
1347     DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1348     /*
1349      * sem->owner may differ from current if the ownership is transferred
1350      * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
1351      */
1352     DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
1353                 !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
1354 
1355     rwsem_clear_owner(sem);
1356     tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
1357     if (unlikely(tmp & RWSEM_FLAG_WAITERS))
1358         rwsem_wake(sem);
1359 }
1360 
1361 /*
1362  * downgrade write lock to read lock
1363  */
1364 static inline void __downgrade_write(struct rw_semaphore *sem)
1365 {
1366     long tmp;
1367 
1368     /*
1369      * When downgrading from exclusive to shared ownership,
1370      * anything inside the write-locked region cannot leak
1371      * into the read side. In contrast, anything in the
1372      * read-locked region is ok to be re-ordered into the
1373      * write side. As such, rely on RELEASE semantics.
1374      */
1375     DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
1376     tmp = atomic_long_fetch_add_release(
1377         -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
1378     rwsem_set_reader_owned(sem);
1379     if (tmp & RWSEM_FLAG_WAITERS)
1380         rwsem_downgrade_wake(sem);
1381 }
1382 
1383 #else /* !CONFIG_PREEMPT_RT */
1384 
1385 #define RT_MUTEX_BUILD_MUTEX
1386 #include "rtmutex.c"
1387 
1388 #define rwbase_set_and_save_current_state(state)    \
1389     set_current_state(state)
1390 
1391 #define rwbase_restore_current_state()          \
1392     __set_current_state(TASK_RUNNING)
1393 
1394 #define rwbase_rtmutex_lock_state(rtm, state)       \
1395     __rt_mutex_lock(rtm, state)
1396 
1397 #define rwbase_rtmutex_slowlock_locked(rtm, state)  \
1398     __rt_mutex_slowlock_locked(rtm, NULL, state)
1399 
1400 #define rwbase_rtmutex_unlock(rtm)          \
1401     __rt_mutex_unlock(rtm)
1402 
1403 #define rwbase_rtmutex_trylock(rtm)         \
1404     __rt_mutex_trylock(rtm)
1405 
1406 #define rwbase_signal_pending_state(state, current) \
1407     signal_pending_state(state, current)
1408 
1409 #define rwbase_schedule()               \
1410     schedule()
1411 
1412 #include "rwbase_rt.c"
1413 
1414 void __init_rwsem(struct rw_semaphore *sem, const char *name,
1415           struct lock_class_key *key)
1416 {
1417     init_rwbase_rt(&(sem)->rwbase);
1418 
1419 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1420     debug_check_no_locks_freed((void *)sem, sizeof(*sem));
1421     lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
1422 #endif
1423 }
1424 EXPORT_SYMBOL(__init_rwsem);
1425 
1426 static inline void __down_read(struct rw_semaphore *sem)
1427 {
1428     rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1429 }
1430 
1431 static inline int __down_read_interruptible(struct rw_semaphore *sem)
1432 {
1433     return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE);
1434 }
1435 
1436 static inline int __down_read_killable(struct rw_semaphore *sem)
1437 {
1438     return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE);
1439 }
1440 
1441 static inline int __down_read_trylock(struct rw_semaphore *sem)
1442 {
1443     return rwbase_read_trylock(&sem->rwbase);
1444 }
1445 
1446 static inline void __up_read(struct rw_semaphore *sem)
1447 {
1448     rwbase_read_unlock(&sem->rwbase, TASK_NORMAL);
1449 }
1450 
1451 static inline void __sched __down_write(struct rw_semaphore *sem)
1452 {
1453     rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1454 }
1455 
1456 static inline int __sched __down_write_killable(struct rw_semaphore *sem)
1457 {
1458     return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE);
1459 }
1460 
1461 static inline int __down_write_trylock(struct rw_semaphore *sem)
1462 {
1463     return rwbase_write_trylock(&sem->rwbase);
1464 }
1465 
1466 static inline void __up_write(struct rw_semaphore *sem)
1467 {
1468     rwbase_write_unlock(&sem->rwbase);
1469 }
1470 
1471 static inline void __downgrade_write(struct rw_semaphore *sem)
1472 {
1473     rwbase_write_downgrade(&sem->rwbase);
1474 }
1475 
1476 /* Debug stubs for the common API */
1477 #define DEBUG_RWSEMS_WARN_ON(c, sem)
1478 
1479 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
1480                         struct task_struct *owner)
1481 {
1482 }
1483 
1484 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
1485 {
1486     int count = atomic_read(&sem->rwbase.readers);
1487 
1488     return count < 0 && count != READER_BIAS;
1489 }
1490 
1491 #endif /* CONFIG_PREEMPT_RT */
1492 
1493 /*
1494  * lock for reading
1495  */
1496 void __sched down_read(struct rw_semaphore *sem)
1497 {
1498     might_sleep();
1499     rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1500 
1501     LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1502 }
1503 EXPORT_SYMBOL(down_read);
1504 
1505 int __sched down_read_interruptible(struct rw_semaphore *sem)
1506 {
1507     might_sleep();
1508     rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1509 
1510     if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
1511         rwsem_release(&sem->dep_map, _RET_IP_);
1512         return -EINTR;
1513     }
1514 
1515     return 0;
1516 }
1517 EXPORT_SYMBOL(down_read_interruptible);
1518 
1519 int __sched down_read_killable(struct rw_semaphore *sem)
1520 {
1521     might_sleep();
1522     rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1523 
1524     if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1525         rwsem_release(&sem->dep_map, _RET_IP_);
1526         return -EINTR;
1527     }
1528 
1529     return 0;
1530 }
1531 EXPORT_SYMBOL(down_read_killable);
1532 
1533 /*
1534  * trylock for reading -- returns 1 if successful, 0 if contention
1535  */
1536 int down_read_trylock(struct rw_semaphore *sem)
1537 {
1538     int ret = __down_read_trylock(sem);
1539 
1540     if (ret == 1)
1541         rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
1542     return ret;
1543 }
1544 EXPORT_SYMBOL(down_read_trylock);
1545 
1546 /*
1547  * lock for writing
1548  */
1549 void __sched down_write(struct rw_semaphore *sem)
1550 {
1551     might_sleep();
1552     rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1553     LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1554 }
1555 EXPORT_SYMBOL(down_write);
1556 
1557 /*
1558  * lock for writing
1559  */
1560 int __sched down_write_killable(struct rw_semaphore *sem)
1561 {
1562     might_sleep();
1563     rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1564 
1565     if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1566                   __down_write_killable)) {
1567         rwsem_release(&sem->dep_map, _RET_IP_);
1568         return -EINTR;
1569     }
1570 
1571     return 0;
1572 }
1573 EXPORT_SYMBOL(down_write_killable);
1574 
1575 /*
1576  * trylock for writing -- returns 1 if successful, 0 if contention
1577  */
1578 int down_write_trylock(struct rw_semaphore *sem)
1579 {
1580     int ret = __down_write_trylock(sem);
1581 
1582     if (ret == 1)
1583         rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
1584 
1585     return ret;
1586 }
1587 EXPORT_SYMBOL(down_write_trylock);
1588 
1589 /*
1590  * release a read lock
1591  */
1592 void up_read(struct rw_semaphore *sem)
1593 {
1594     rwsem_release(&sem->dep_map, _RET_IP_);
1595     __up_read(sem);
1596 }
1597 EXPORT_SYMBOL(up_read);
1598 
1599 /*
1600  * release a write lock
1601  */
1602 void up_write(struct rw_semaphore *sem)
1603 {
1604     rwsem_release(&sem->dep_map, _RET_IP_);
1605     __up_write(sem);
1606 }
1607 EXPORT_SYMBOL(up_write);
1608 
1609 /*
1610  * downgrade write lock to read lock
1611  */
1612 void downgrade_write(struct rw_semaphore *sem)
1613 {
1614     lock_downgrade(&sem->dep_map, _RET_IP_);
1615     __downgrade_write(sem);
1616 }
1617 EXPORT_SYMBOL(downgrade_write);
1618 
1619 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1620 
1621 void down_read_nested(struct rw_semaphore *sem, int subclass)
1622 {
1623     might_sleep();
1624     rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1625     LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1626 }
1627 EXPORT_SYMBOL(down_read_nested);
1628 
1629 int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
1630 {
1631     might_sleep();
1632     rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1633 
1634     if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1635         rwsem_release(&sem->dep_map, _RET_IP_);
1636         return -EINTR;
1637     }
1638 
1639     return 0;
1640 }
1641 EXPORT_SYMBOL(down_read_killable_nested);
1642 
1643 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1644 {
1645     might_sleep();
1646     rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1647     LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1648 }
1649 EXPORT_SYMBOL(_down_write_nest_lock);
1650 
1651 void down_read_non_owner(struct rw_semaphore *sem)
1652 {
1653     might_sleep();
1654     __down_read(sem);
1655     __rwsem_set_reader_owned(sem, NULL);
1656 }
1657 EXPORT_SYMBOL(down_read_non_owner);
1658 
1659 void down_write_nested(struct rw_semaphore *sem, int subclass)
1660 {
1661     might_sleep();
1662     rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1663     LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1664 }
1665 EXPORT_SYMBOL(down_write_nested);
1666 
1667 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1668 {
1669     might_sleep();
1670     rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1671 
1672     if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1673                   __down_write_killable)) {
1674         rwsem_release(&sem->dep_map, _RET_IP_);
1675         return -EINTR;
1676     }
1677 
1678     return 0;
1679 }
1680 EXPORT_SYMBOL(down_write_killable_nested);
1681 
1682 void up_read_non_owner(struct rw_semaphore *sem)
1683 {
1684     DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1685     __up_read(sem);
1686 }
1687 EXPORT_SYMBOL(up_read_non_owner);
1688 
1689 #endif