Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Queued read/write locks
0004  *
0005  * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
0006  *
0007  * Authors: Waiman Long <waiman.long@hp.com>
0008  */
0009 #include <linux/smp.h>
0010 #include <linux/bug.h>
0011 #include <linux/cpumask.h>
0012 #include <linux/percpu.h>
0013 #include <linux/hardirq.h>
0014 #include <linux/spinlock.h>
0015 #include <trace/events/lock.h>
0016 
0017 /**
0018  * queued_read_lock_slowpath - acquire read lock of a queued rwlock
0019  * @lock: Pointer to queued rwlock structure
0020  */
0021 void queued_read_lock_slowpath(struct qrwlock *lock)
0022 {
0023     /*
0024      * Readers come here when they cannot get the lock without waiting
0025      */
0026     if (unlikely(in_interrupt())) {
0027         /*
0028          * Readers in interrupt context will get the lock immediately
0029          * if the writer is just waiting (not holding the lock yet),
0030          * so spin with ACQUIRE semantics until the lock is available
0031          * without waiting in the queue.
0032          */
0033         atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
0034         return;
0035     }
0036     atomic_sub(_QR_BIAS, &lock->cnts);
0037 
0038     trace_contention_begin(lock, LCB_F_SPIN | LCB_F_READ);
0039 
0040     /*
0041      * Put the reader into the wait queue
0042      */
0043     arch_spin_lock(&lock->wait_lock);
0044     atomic_add(_QR_BIAS, &lock->cnts);
0045 
0046     /*
0047      * The ACQUIRE semantics of the following spinning code ensure
0048      * that accesses can't leak upwards out of our subsequent critical
0049      * section in the case that the lock is currently held for write.
0050      */
0051     atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
0052 
0053     /*
0054      * Signal the next one in queue to become queue head
0055      */
0056     arch_spin_unlock(&lock->wait_lock);
0057 
0058     trace_contention_end(lock, 0);
0059 }
0060 EXPORT_SYMBOL(queued_read_lock_slowpath);
0061 
0062 /**
0063  * queued_write_lock_slowpath - acquire write lock of a queued rwlock
0064  * @lock : Pointer to queued rwlock structure
0065  */
0066 void queued_write_lock_slowpath(struct qrwlock *lock)
0067 {
0068     int cnts;
0069 
0070     trace_contention_begin(lock, LCB_F_SPIN | LCB_F_WRITE);
0071 
0072     /* Put the writer into the wait queue */
0073     arch_spin_lock(&lock->wait_lock);
0074 
0075     /* Try to acquire the lock directly if no reader is present */
0076     if (!(cnts = atomic_read(&lock->cnts)) &&
0077         atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))
0078         goto unlock;
0079 
0080     /* Set the waiting flag to notify readers that a writer is pending */
0081     atomic_or(_QW_WAITING, &lock->cnts);
0082 
0083     /* When no more readers or writers, set the locked flag */
0084     do {
0085         cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING);
0086     } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED));
0087 unlock:
0088     arch_spin_unlock(&lock->wait_lock);
0089 
0090     trace_contention_end(lock, 0);
0091 }
0092 EXPORT_SYMBOL(queued_write_lock_slowpath);