0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #define pr_fmt(fmt) "Hyper-V: " fmt
0012
0013 #include <linux/spinlock.h>
0014
0015 #include <asm/mshyperv.h>
0016 #include <asm/paravirt.h>
0017 #include <asm/apic.h>
0018
0019 static bool __initdata hv_pvspin = true;
0020
0021 static void hv_qlock_kick(int cpu)
0022 {
0023 apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
0024 }
0025
0026 static void hv_qlock_wait(u8 *byte, u8 val)
0027 {
0028 unsigned long flags;
0029
0030 if (in_nmi())
0031 return;
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 local_irq_save(flags);
0047
0048
0049
0050 if (READ_ONCE(*byte) == val) {
0051 unsigned long msr_val;
0052
0053 rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val);
0054
0055 (void)msr_val;
0056 }
0057 local_irq_restore(flags);
0058 }
0059
0060
0061
0062
0063 __visible bool hv_vcpu_is_preempted(int vcpu)
0064 {
0065 return false;
0066 }
0067 PV_CALLEE_SAVE_REGS_THUNK(hv_vcpu_is_preempted);
0068
0069 void __init hv_init_spinlocks(void)
0070 {
0071 if (!hv_pvspin || !apic ||
0072 !(ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) ||
0073 !(ms_hyperv.features & HV_MSR_GUEST_IDLE_AVAILABLE)) {
0074 pr_info("PV spinlocks disabled\n");
0075 return;
0076 }
0077 pr_info("PV spinlocks enabled\n");
0078
0079 __pv_init_lock_hash();
0080 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
0081 pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
0082 pv_ops.lock.wait = hv_qlock_wait;
0083 pv_ops.lock.kick = hv_qlock_kick;
0084 pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
0085 }
0086
0087 static __init int hv_parse_nopvspin(char *arg)
0088 {
0089 hv_pvspin = false;
0090 return 0;
0091 }
0092 early_param("hv_nopvspin", hv_parse_nopvspin);