Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _GEN_PV_LOCK_SLOWPATH
0003 #error "do not include this file"
0004 #endif
0005 
0006 #include <linux/hash.h>
0007 #include <linux/memblock.h>
0008 #include <linux/debug_locks.h>
0009 
0010 /*
0011  * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
0012  * of spinning them.
0013  *
0014  * This relies on the architecture to provide two paravirt hypercalls:
0015  *
0016  *   pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
0017  *   pv_kick(cpu)             -- wakes a suspended vcpu
0018  *
0019  * Using these we implement __pv_queued_spin_lock_slowpath() and
0020  * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and
0021  * native_queued_spin_unlock().
0022  */
0023 
0024 #define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET)
0025 
0026 /*
0027  * Queue Node Adaptive Spinning
0028  *
0029  * A queue node vCPU will stop spinning if the vCPU in the previous node is
0030  * not running. The one lock stealing attempt allowed at slowpath entry
0031  * mitigates the slight slowdown for non-overcommitted guest with this
0032  * aggressive wait-early mechanism.
0033  *
0034  * The status of the previous node will be checked at fixed interval
0035  * controlled by PV_PREV_CHECK_MASK. This is to ensure that we won't
0036  * pound on the cacheline of the previous node too heavily.
0037  */
0038 #define PV_PREV_CHECK_MASK  0xff
0039 
0040 /*
0041  * Queue node uses: vcpu_running & vcpu_halted.
0042  * Queue head uses: vcpu_running & vcpu_hashed.
0043  */
0044 enum vcpu_state {
0045     vcpu_running = 0,
0046     vcpu_halted,        /* Used only in pv_wait_node */
0047     vcpu_hashed,        /* = pv_hash'ed + vcpu_halted */
0048 };
0049 
0050 struct pv_node {
0051     struct mcs_spinlock mcs;
0052     int         cpu;
0053     u8          state;
0054 };
0055 
0056 /*
0057  * Hybrid PV queued/unfair lock
0058  *
0059  * By replacing the regular queued_spin_trylock() with the function below,
0060  * it will be called once when a lock waiter enter the PV slowpath before
0061  * being queued.
0062  *
0063  * The pending bit is set by the queue head vCPU of the MCS wait queue in
0064  * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
0065  * When that bit becomes visible to the incoming waiters, no lock stealing
0066  * is allowed. The function will return immediately to make the waiters
0067  * enter the MCS wait queue. So lock starvation shouldn't happen as long
0068  * as the queued mode vCPUs are actively running to set the pending bit
0069  * and hence disabling lock stealing.
0070  *
0071  * When the pending bit isn't set, the lock waiters will stay in the unfair
0072  * mode spinning on the lock unless the MCS wait queue is empty. In this
0073  * case, the lock waiters will enter the queued mode slowpath trying to
0074  * become the queue head and set the pending bit.
0075  *
0076  * This hybrid PV queued/unfair lock combines the best attributes of a
0077  * queued lock (no lock starvation) and an unfair lock (good performance
0078  * on not heavily contended locks).
0079  */
0080 #define queued_spin_trylock(l)  pv_hybrid_queued_unfair_trylock(l)
0081 static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
0082 {
0083     /*
0084      * Stay in unfair lock mode as long as queued mode waiters are
0085      * present in the MCS wait queue but the pending bit isn't set.
0086      */
0087     for (;;) {
0088         int val = atomic_read(&lock->val);
0089 
0090         if (!(val & _Q_LOCKED_PENDING_MASK) &&
0091            (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
0092             lockevent_inc(pv_lock_stealing);
0093             return true;
0094         }
0095         if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK))
0096             break;
0097 
0098         cpu_relax();
0099     }
0100 
0101     return false;
0102 }
0103 
0104 /*
0105  * The pending bit is used by the queue head vCPU to indicate that it
0106  * is actively spinning on the lock and no lock stealing is allowed.
0107  */
0108 #if _Q_PENDING_BITS == 8
0109 static __always_inline void set_pending(struct qspinlock *lock)
0110 {
0111     WRITE_ONCE(lock->pending, 1);
0112 }
0113 
0114 /*
0115  * The pending bit check in pv_queued_spin_steal_lock() isn't a memory
0116  * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the
0117  * lock just to be sure that it will get it.
0118  */
0119 static __always_inline int trylock_clear_pending(struct qspinlock *lock)
0120 {
0121     return !READ_ONCE(lock->locked) &&
0122            (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL,
0123                 _Q_LOCKED_VAL) == _Q_PENDING_VAL);
0124 }
0125 #else /* _Q_PENDING_BITS == 8 */
0126 static __always_inline void set_pending(struct qspinlock *lock)
0127 {
0128     atomic_or(_Q_PENDING_VAL, &lock->val);
0129 }
0130 
0131 static __always_inline int trylock_clear_pending(struct qspinlock *lock)
0132 {
0133     int val = atomic_read(&lock->val);
0134 
0135     for (;;) {
0136         int old, new;
0137 
0138         if (val  & _Q_LOCKED_MASK)
0139             break;
0140 
0141         /*
0142          * Try to clear pending bit & set locked bit
0143          */
0144         old = val;
0145         new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
0146         val = atomic_cmpxchg_acquire(&lock->val, old, new);
0147 
0148         if (val == old)
0149             return 1;
0150     }
0151     return 0;
0152 }
0153 #endif /* _Q_PENDING_BITS == 8 */
0154 
0155 /*
0156  * Lock and MCS node addresses hash table for fast lookup
0157  *
0158  * Hashing is done on a per-cacheline basis to minimize the need to access
0159  * more than one cacheline.
0160  *
0161  * Dynamically allocate a hash table big enough to hold at least 4X the
0162  * number of possible cpus in the system. Allocation is done on page
0163  * granularity. So the minimum number of hash buckets should be at least
0164  * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page.
0165  *
0166  * Since we should not be holding locks from NMI context (very rare indeed) the
0167  * max load factor is 0.75, which is around the point where open addressing
0168  * breaks down.
0169  *
0170  */
0171 struct pv_hash_entry {
0172     struct qspinlock *lock;
0173     struct pv_node   *node;
0174 };
0175 
0176 #define PV_HE_PER_LINE  (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry))
0177 #define PV_HE_MIN   (PAGE_SIZE / sizeof(struct pv_hash_entry))
0178 
0179 static struct pv_hash_entry *pv_lock_hash;
0180 static unsigned int pv_lock_hash_bits __read_mostly;
0181 
0182 /*
0183  * Allocate memory for the PV qspinlock hash buckets
0184  *
0185  * This function should be called from the paravirt spinlock initialization
0186  * routine.
0187  */
0188 void __init __pv_init_lock_hash(void)
0189 {
0190     int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE);
0191 
0192     if (pv_hash_size < PV_HE_MIN)
0193         pv_hash_size = PV_HE_MIN;
0194 
0195     /*
0196      * Allocate space from bootmem which should be page-size aligned
0197      * and hence cacheline aligned.
0198      */
0199     pv_lock_hash = alloc_large_system_hash("PV qspinlock",
0200                            sizeof(struct pv_hash_entry),
0201                            pv_hash_size, 0,
0202                            HASH_EARLY | HASH_ZERO,
0203                            &pv_lock_hash_bits, NULL,
0204                            pv_hash_size, pv_hash_size);
0205 }
0206 
0207 #define for_each_hash_entry(he, offset, hash)                       \
0208     for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0;   \
0209          offset < (1 << pv_lock_hash_bits);                     \
0210          offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)])
0211 
0212 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
0213 {
0214     unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
0215     struct pv_hash_entry *he;
0216     int hopcnt = 0;
0217 
0218     for_each_hash_entry(he, offset, hash) {
0219         hopcnt++;
0220         if (!cmpxchg(&he->lock, NULL, lock)) {
0221             WRITE_ONCE(he->node, node);
0222             lockevent_pv_hop(hopcnt);
0223             return &he->lock;
0224         }
0225     }
0226     /*
0227      * Hard assume there is a free entry for us.
0228      *
0229      * This is guaranteed by ensuring every blocked lock only ever consumes
0230      * a single entry, and since we only have 4 nesting levels per CPU
0231      * and allocated 4*nr_possible_cpus(), this must be so.
0232      *
0233      * The single entry is guaranteed by having the lock owner unhash
0234      * before it releases.
0235      */
0236     BUG();
0237 }
0238 
0239 static struct pv_node *pv_unhash(struct qspinlock *lock)
0240 {
0241     unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
0242     struct pv_hash_entry *he;
0243     struct pv_node *node;
0244 
0245     for_each_hash_entry(he, offset, hash) {
0246         if (READ_ONCE(he->lock) == lock) {
0247             node = READ_ONCE(he->node);
0248             WRITE_ONCE(he->lock, NULL);
0249             return node;
0250         }
0251     }
0252     /*
0253      * Hard assume we'll find an entry.
0254      *
0255      * This guarantees a limited lookup time and is itself guaranteed by
0256      * having the lock owner do the unhash -- IFF the unlock sees the
0257      * SLOW flag, there MUST be a hash entry.
0258      */
0259     BUG();
0260 }
0261 
0262 /*
0263  * Return true if when it is time to check the previous node which is not
0264  * in a running state.
0265  */
0266 static inline bool
0267 pv_wait_early(struct pv_node *prev, int loop)
0268 {
0269     if ((loop & PV_PREV_CHECK_MASK) != 0)
0270         return false;
0271 
0272     return READ_ONCE(prev->state) != vcpu_running;
0273 }
0274 
0275 /*
0276  * Initialize the PV part of the mcs_spinlock node.
0277  */
0278 static void pv_init_node(struct mcs_spinlock *node)
0279 {
0280     struct pv_node *pn = (struct pv_node *)node;
0281 
0282     BUILD_BUG_ON(sizeof(struct pv_node) > sizeof(struct qnode));
0283 
0284     pn->cpu = smp_processor_id();
0285     pn->state = vcpu_running;
0286 }
0287 
0288 /*
0289  * Wait for node->locked to become true, halt the vcpu after a short spin.
0290  * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its
0291  * behalf.
0292  */
0293 static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
0294 {
0295     struct pv_node *pn = (struct pv_node *)node;
0296     struct pv_node *pp = (struct pv_node *)prev;
0297     int loop;
0298     bool wait_early;
0299 
0300     for (;;) {
0301         for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) {
0302             if (READ_ONCE(node->locked))
0303                 return;
0304             if (pv_wait_early(pp, loop)) {
0305                 wait_early = true;
0306                 break;
0307             }
0308             cpu_relax();
0309         }
0310 
0311         /*
0312          * Order pn->state vs pn->locked thusly:
0313          *
0314          * [S] pn->state = vcpu_halted    [S] next->locked = 1
0315          *     MB                 MB
0316          * [L] pn->locked       [RmW] pn->state = vcpu_hashed
0317          *
0318          * Matches the cmpxchg() from pv_kick_node().
0319          */
0320         smp_store_mb(pn->state, vcpu_halted);
0321 
0322         if (!READ_ONCE(node->locked)) {
0323             lockevent_inc(pv_wait_node);
0324             lockevent_cond_inc(pv_wait_early, wait_early);
0325             pv_wait(&pn->state, vcpu_halted);
0326         }
0327 
0328         /*
0329          * If pv_kick_node() changed us to vcpu_hashed, retain that
0330          * value so that pv_wait_head_or_lock() knows to not also try
0331          * to hash this lock.
0332          */
0333         cmpxchg(&pn->state, vcpu_halted, vcpu_running);
0334 
0335         /*
0336          * If the locked flag is still not set after wakeup, it is a
0337          * spurious wakeup and the vCPU should wait again. However,
0338          * there is a pretty high overhead for CPU halting and kicking.
0339          * So it is better to spin for a while in the hope that the
0340          * MCS lock will be released soon.
0341          */
0342         lockevent_cond_inc(pv_spurious_wakeup,
0343                   !READ_ONCE(node->locked));
0344     }
0345 
0346     /*
0347      * By now our node->locked should be 1 and our caller will not actually
0348      * spin-wait for it. We do however rely on our caller to do a
0349      * load-acquire for us.
0350      */
0351 }
0352 
0353 /*
0354  * Called after setting next->locked = 1 when we're the lock owner.
0355  *
0356  * Instead of waking the waiters stuck in pv_wait_node() advance their state
0357  * such that they're waiting in pv_wait_head_or_lock(), this avoids a
0358  * wake/sleep cycle.
0359  */
0360 static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
0361 {
0362     struct pv_node *pn = (struct pv_node *)node;
0363 
0364     /*
0365      * If the vCPU is indeed halted, advance its state to match that of
0366      * pv_wait_node(). If OTOH this fails, the vCPU was running and will
0367      * observe its next->locked value and advance itself.
0368      *
0369      * Matches with smp_store_mb() and cmpxchg() in pv_wait_node()
0370      *
0371      * The write to next->locked in arch_mcs_spin_unlock_contended()
0372      * must be ordered before the read of pn->state in the cmpxchg()
0373      * below for the code to work correctly. To guarantee full ordering
0374      * irrespective of the success or failure of the cmpxchg(),
0375      * a relaxed version with explicit barrier is used. The control
0376      * dependency will order the reading of pn->state before any
0377      * subsequent writes.
0378      */
0379     smp_mb__before_atomic();
0380     if (cmpxchg_relaxed(&pn->state, vcpu_halted, vcpu_hashed)
0381         != vcpu_halted)
0382         return;
0383 
0384     /*
0385      * Put the lock into the hash table and set the _Q_SLOW_VAL.
0386      *
0387      * As this is the same vCPU that will check the _Q_SLOW_VAL value and
0388      * the hash table later on at unlock time, no atomic instruction is
0389      * needed.
0390      */
0391     WRITE_ONCE(lock->locked, _Q_SLOW_VAL);
0392     (void)pv_hash(lock, pn);
0393 }
0394 
0395 /*
0396  * Wait for l->locked to become clear and acquire the lock;
0397  * halt the vcpu after a short spin.
0398  * __pv_queued_spin_unlock() will wake us.
0399  *
0400  * The current value of the lock will be returned for additional processing.
0401  */
0402 static u32
0403 pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
0404 {
0405     struct pv_node *pn = (struct pv_node *)node;
0406     struct qspinlock **lp = NULL;
0407     int waitcnt = 0;
0408     int loop;
0409 
0410     /*
0411      * If pv_kick_node() already advanced our state, we don't need to
0412      * insert ourselves into the hash table anymore.
0413      */
0414     if (READ_ONCE(pn->state) == vcpu_hashed)
0415         lp = (struct qspinlock **)1;
0416 
0417     /*
0418      * Tracking # of slowpath locking operations
0419      */
0420     lockevent_inc(lock_slowpath);
0421 
0422     for (;; waitcnt++) {
0423         /*
0424          * Set correct vCPU state to be used by queue node wait-early
0425          * mechanism.
0426          */
0427         WRITE_ONCE(pn->state, vcpu_running);
0428 
0429         /*
0430          * Set the pending bit in the active lock spinning loop to
0431          * disable lock stealing before attempting to acquire the lock.
0432          */
0433         set_pending(lock);
0434         for (loop = SPIN_THRESHOLD; loop; loop--) {
0435             if (trylock_clear_pending(lock))
0436                 goto gotlock;
0437             cpu_relax();
0438         }
0439         clear_pending(lock);
0440 
0441 
0442         if (!lp) { /* ONCE */
0443             lp = pv_hash(lock, pn);
0444 
0445             /*
0446              * We must hash before setting _Q_SLOW_VAL, such that
0447              * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
0448              * we'll be sure to be able to observe our hash entry.
0449              *
0450              *   [S] <hash>                 [Rmw] l->locked == _Q_SLOW_VAL
0451              *       MB                           RMB
0452              * [RmW] l->locked = _Q_SLOW_VAL  [L] <unhash>
0453              *
0454              * Matches the smp_rmb() in __pv_queued_spin_unlock().
0455              */
0456             if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) {
0457                 /*
0458                  * The lock was free and now we own the lock.
0459                  * Change the lock value back to _Q_LOCKED_VAL
0460                  * and unhash the table.
0461                  */
0462                 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
0463                 WRITE_ONCE(*lp, NULL);
0464                 goto gotlock;
0465             }
0466         }
0467         WRITE_ONCE(pn->state, vcpu_hashed);
0468         lockevent_inc(pv_wait_head);
0469         lockevent_cond_inc(pv_wait_again, waitcnt);
0470         pv_wait(&lock->locked, _Q_SLOW_VAL);
0471 
0472         /*
0473          * Because of lock stealing, the queue head vCPU may not be
0474          * able to acquire the lock before it has to wait again.
0475          */
0476     }
0477 
0478     /*
0479      * The cmpxchg() or xchg() call before coming here provides the
0480      * acquire semantics for locking. The dummy ORing of _Q_LOCKED_VAL
0481      * here is to indicate to the compiler that the value will always
0482      * be nozero to enable better code optimization.
0483      */
0484 gotlock:
0485     return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL);
0486 }
0487 
0488 /*
0489  * PV versions of the unlock fastpath and slowpath functions to be used
0490  * instead of queued_spin_unlock().
0491  */
0492 __visible void
0493 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
0494 {
0495     struct pv_node *node;
0496 
0497     if (unlikely(locked != _Q_SLOW_VAL)) {
0498         WARN(!debug_locks_silent,
0499              "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n",
0500              (unsigned long)lock, atomic_read(&lock->val));
0501         return;
0502     }
0503 
0504     /*
0505      * A failed cmpxchg doesn't provide any memory-ordering guarantees,
0506      * so we need a barrier to order the read of the node data in
0507      * pv_unhash *after* we've read the lock being _Q_SLOW_VAL.
0508      *
0509      * Matches the cmpxchg() in pv_wait_head_or_lock() setting _Q_SLOW_VAL.
0510      */
0511     smp_rmb();
0512 
0513     /*
0514      * Since the above failed to release, this must be the SLOW path.
0515      * Therefore start by looking up the blocked node and unhashing it.
0516      */
0517     node = pv_unhash(lock);
0518 
0519     /*
0520      * Now that we have a reference to the (likely) blocked pv_node,
0521      * release the lock.
0522      */
0523     smp_store_release(&lock->locked, 0);
0524 
0525     /*
0526      * At this point the memory pointed at by lock can be freed/reused,
0527      * however we can still use the pv_node to kick the CPU.
0528      * The other vCPU may not really be halted, but kicking an active
0529      * vCPU is harmless other than the additional latency in completing
0530      * the unlock.
0531      */
0532     lockevent_inc(pv_kick_unlock);
0533     pv_kick(node->cpu);
0534 }
0535 
0536 /*
0537  * Include the architecture specific callee-save thunk of the
0538  * __pv_queued_spin_unlock(). This thunk is put together with
0539  * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
0540  * function close to each other sharing consecutive instruction cachelines.
0541  * Alternatively, architecture specific version of __pv_queued_spin_unlock()
0542  * can be defined.
0543  */
0544 #include <asm/qspinlock_paravirt.h>
0545 
0546 #ifndef __pv_queued_spin_unlock
0547 __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
0548 {
0549     u8 locked;
0550 
0551     /*
0552      * We must not unlock if SLOW, because in that case we must first
0553      * unhash. Otherwise it would be possible to have multiple @lock
0554      * entries, which would be BAD.
0555      */
0556     locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0);
0557     if (likely(locked == _Q_LOCKED_VAL))
0558         return;
0559 
0560     __pv_queued_spin_unlock_slowpath(lock, locked);
0561 }
0562 #endif /* __pv_queued_spin_unlock */