Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * A stand-alone ticket spinlock implementation for use by the non-VHE
0004  * KVM hypervisor code running at EL2.
0005  *
0006  * Copyright (C) 2020 Google LLC
0007  * Author: Will Deacon <will@kernel.org>
0008  *
0009  * Heavily based on the implementation removed by c11090474d70 which was:
0010  * Copyright (C) 2012 ARM Ltd.
0011  */
0012 
0013 #ifndef __ARM64_KVM_NVHE_SPINLOCK_H__
0014 #define __ARM64_KVM_NVHE_SPINLOCK_H__
0015 
0016 #include <asm/alternative.h>
0017 #include <asm/lse.h>
0018 #include <asm/rwonce.h>
0019 
0020 typedef union hyp_spinlock {
0021     u32 __val;
0022     struct {
0023 #ifdef __AARCH64EB__
0024         u16 next, owner;
0025 #else
0026         u16 owner, next;
0027 #endif
0028     };
0029 } hyp_spinlock_t;
0030 
0031 #define hyp_spin_lock_init(l)                       \
0032 do {                                    \
0033     *(l) = (hyp_spinlock_t){ .__val = 0 };              \
0034 } while (0)
0035 
0036 static inline void hyp_spin_lock(hyp_spinlock_t *lock)
0037 {
0038     u32 tmp;
0039     hyp_spinlock_t lockval, newval;
0040 
0041     asm volatile(
0042     /* Atomically increment the next ticket. */
0043     ARM64_LSE_ATOMIC_INSN(
0044     /* LL/SC */
0045 "   prfm    pstl1strm, %3\n"
0046 "1: ldaxr   %w0, %3\n"
0047 "   add %w1, %w0, #(1 << 16)\n"
0048 "   stxr    %w2, %w1, %3\n"
0049 "   cbnz    %w2, 1b\n",
0050     /* LSE atomics */
0051 "   mov %w2, #(1 << 16)\n"
0052 "   ldadda  %w2, %w0, %3\n"
0053     __nops(3))
0054 
0055     /* Did we get the lock? */
0056 "   eor %w1, %w0, %w0, ror #16\n"
0057 "   cbz %w1, 3f\n"
0058     /*
0059      * No: spin on the owner. Send a local event to avoid missing an
0060      * unlock before the exclusive load.
0061      */
0062 "   sevl\n"
0063 "2: wfe\n"
0064 "   ldaxrh  %w2, %4\n"
0065 "   eor %w1, %w2, %w0, lsr #16\n"
0066 "   cbnz    %w1, 2b\n"
0067     /* We got the lock. Critical section starts here. */
0068 "3:"
0069     : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
0070     : "Q" (lock->owner)
0071     : "memory");
0072 }
0073 
0074 static inline void hyp_spin_unlock(hyp_spinlock_t *lock)
0075 {
0076     u64 tmp;
0077 
0078     asm volatile(
0079     ARM64_LSE_ATOMIC_INSN(
0080     /* LL/SC */
0081     "   ldrh    %w1, %0\n"
0082     "   add %w1, %w1, #1\n"
0083     "   stlrh   %w1, %0",
0084     /* LSE atomics */
0085     "   mov %w1, #1\n"
0086     "   staddlh %w1, %0\n"
0087     __nops(1))
0088     : "=Q" (lock->owner), "=&r" (tmp)
0089     :
0090     : "memory");
0091 }
0092 
0093 static inline bool hyp_spin_is_locked(hyp_spinlock_t *lock)
0094 {
0095     hyp_spinlock_t lockval = READ_ONCE(*lock);
0096 
0097     return lockval.owner != lockval.next;
0098 }
0099 
0100 #ifdef CONFIG_NVHE_EL2_DEBUG
0101 static inline void hyp_assert_lock_held(hyp_spinlock_t *lock)
0102 {
0103     /*
0104      * The __pkvm_init() path accesses protected data-structures without
0105      * holding locks as the other CPUs are guaranteed to not enter EL2
0106      * concurrently at this point in time. The point by which EL2 is
0107      * initialized on all CPUs is reflected in the pkvm static key, so
0108      * wait until it is set before checking the lock state.
0109      */
0110     if (static_branch_likely(&kvm_protected_mode_initialized))
0111         BUG_ON(!hyp_spin_is_locked(lock));
0112 }
0113 #else
0114 static inline void hyp_assert_lock_held(hyp_spinlock_t *lock) { }
0115 #endif
0116 
0117 #endif /* __ARM64_KVM_NVHE_SPINLOCK_H__ */