Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Split spinlock implementation out into its own file, so it can be
0004  * compiled in a FTRACE-compatible way.
0005  */
0006 #include <linux/spinlock.h>
0007 #include <linux/export.h>
0008 #include <linux/jump_label.h>
0009 
0010 #include <asm/paravirt.h>
0011 
0012 __visible void __native_queued_spin_unlock(struct qspinlock *lock)
0013 {
0014     native_queued_spin_unlock(lock);
0015 }
0016 PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
0017 
0018 bool pv_is_native_spin_unlock(void)
0019 {
0020     return pv_ops.lock.queued_spin_unlock.func ==
0021         __raw_callee_save___native_queued_spin_unlock;
0022 }
0023 
0024 __visible bool __native_vcpu_is_preempted(long cpu)
0025 {
0026     return false;
0027 }
0028 PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
0029 
0030 bool pv_is_native_vcpu_is_preempted(void)
0031 {
0032     return pv_ops.lock.vcpu_is_preempted.func ==
0033         __raw_callee_save___native_vcpu_is_preempted;
0034 }
0035 
0036 void __init paravirt_set_cap(void)
0037 {
0038     if (!pv_is_native_spin_unlock())
0039         setup_force_cpu_cap(X86_FEATURE_PVUNLOCK);
0040 
0041     if (!pv_is_native_vcpu_is_preempted())
0042         setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT);
0043 }