Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * MCS lock defines
0004  *
0005  * This file contains the main data structure and API definitions of MCS lock.
0006  *
0007  * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
0008  * with the desirable properties of being fair, and with each cpu trying
0009  * to acquire the lock spinning on a local variable.
0010  * It avoids expensive cache bounces that common test-and-set spin-lock
0011  * implementations incur.
0012  */
0013 #ifndef __LINUX_MCS_SPINLOCK_H
0014 #define __LINUX_MCS_SPINLOCK_H
0015 
0016 #include <asm/mcs_spinlock.h>
0017 
0018 struct mcs_spinlock {
0019     struct mcs_spinlock *next;
0020     int locked; /* 1 if lock acquired */
0021     int count;  /* nesting count, see qspinlock.c */
0022 };
0023 
0024 #ifndef arch_mcs_spin_lock_contended
0025 /*
0026  * Using smp_cond_load_acquire() provides the acquire semantics
0027  * required so that subsequent operations happen after the
0028  * lock is acquired. Additionally, some architectures such as
0029  * ARM64 would like to do spin-waiting instead of purely
0030  * spinning, and smp_cond_load_acquire() provides that behavior.
0031  */
0032 #define arch_mcs_spin_lock_contended(l)                 \
0033 do {                                    \
0034     smp_cond_load_acquire(l, VAL);                  \
0035 } while (0)
0036 #endif
0037 
0038 #ifndef arch_mcs_spin_unlock_contended
0039 /*
0040  * smp_store_release() provides a memory barrier to ensure all
0041  * operations in the critical section has been completed before
0042  * unlocking.
0043  */
0044 #define arch_mcs_spin_unlock_contended(l)               \
0045     smp_store_release((l), 1)
0046 #endif
0047 
0048 /*
0049  * Note: the smp_load_acquire/smp_store_release pair is not
0050  * sufficient to form a full memory barrier across
0051  * cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
0052  * For applications that need a full barrier across multiple cpus
0053  * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
0054  * used after mcs_lock.
0055  */
0056 
0057 /*
0058  * In order to acquire the lock, the caller should declare a local node and
0059  * pass a reference of the node to this function in addition to the lock.
0060  * If the lock has already been acquired, then this will proceed to spin
0061  * on this node->locked until the previous lock holder sets the node->locked
0062  * in mcs_spin_unlock().
0063  */
0064 static inline
0065 void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
0066 {
0067     struct mcs_spinlock *prev;
0068 
0069     /* Init node */
0070     node->locked = 0;
0071     node->next   = NULL;
0072 
0073     /*
0074      * We rely on the full barrier with global transitivity implied by the
0075      * below xchg() to order the initialization stores above against any
0076      * observation of @node. And to provide the ACQUIRE ordering associated
0077      * with a LOCK primitive.
0078      */
0079     prev = xchg(lock, node);
0080     if (likely(prev == NULL)) {
0081         /*
0082          * Lock acquired, don't need to set node->locked to 1. Threads
0083          * only spin on its own node->locked value for lock acquisition.
0084          * However, since this thread can immediately acquire the lock
0085          * and does not proceed to spin on its own node->locked, this
0086          * value won't be used. If a debug mode is needed to
0087          * audit lock status, then set node->locked value here.
0088          */
0089         return;
0090     }
0091     WRITE_ONCE(prev->next, node);
0092 
0093     /* Wait until the lock holder passes the lock down. */
0094     arch_mcs_spin_lock_contended(&node->locked);
0095 }
0096 
0097 /*
0098  * Releases the lock. The caller should pass in the corresponding node that
0099  * was used to acquire the lock.
0100  */
0101 static inline
0102 void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
0103 {
0104     struct mcs_spinlock *next = READ_ONCE(node->next);
0105 
0106     if (likely(!next)) {
0107         /*
0108          * Release the lock by setting it to NULL
0109          */
0110         if (likely(cmpxchg_release(lock, node, NULL) == node))
0111             return;
0112         /* Wait until the next pointer is set */
0113         while (!(next = READ_ONCE(node->next)))
0114             cpu_relax();
0115     }
0116 
0117     /* Pass lock to next waiter. */
0118     arch_mcs_spin_unlock_contended(&next->locked);
0119 }
0120 
0121 #endif /* __LINUX_MCS_SPINLOCK_H */