Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 
0003 /*
0004  * Hyper-V specific spinlock code.
0005  *
0006  * Copyright (C) 2018, Intel, Inc.
0007  *
0008  * Author : Yi Sun <yi.y.sun@intel.com>
0009  */
0010 
0011 #define pr_fmt(fmt) "Hyper-V: " fmt
0012 
0013 #include <linux/spinlock.h>
0014 
0015 #include <asm/mshyperv.h>
0016 #include <asm/paravirt.h>
0017 #include <asm/apic.h>
0018 
0019 static bool __initdata hv_pvspin = true;
0020 
0021 static void hv_qlock_kick(int cpu)
0022 {
0023     apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
0024 }
0025 
0026 static void hv_qlock_wait(u8 *byte, u8 val)
0027 {
0028     unsigned long flags;
0029 
0030     if (in_nmi())
0031         return;
0032 
0033     /*
0034      * Reading HV_X64_MSR_GUEST_IDLE MSR tells the hypervisor that the
0035      * vCPU can be put into 'idle' state. This 'idle' state is
0036      * terminated by an IPI, usually from hv_qlock_kick(), even if
0037      * interrupts are disabled on the vCPU.
0038      *
0039      * To prevent a race against the unlock path it is required to
0040      * disable interrupts before accessing the HV_X64_MSR_GUEST_IDLE
0041      * MSR. Otherwise, if the IPI from hv_qlock_kick() arrives between
0042      * the lock value check and the rdmsrl() then the vCPU might be put
0043      * into 'idle' state by the hypervisor and kept in that state for
0044      * an unspecified amount of time.
0045      */
0046     local_irq_save(flags);
0047     /*
0048      * Only issue the rdmsrl() when the lock state has not changed.
0049      */
0050     if (READ_ONCE(*byte) == val) {
0051         unsigned long msr_val;
0052 
0053         rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val);
0054 
0055         (void)msr_val;
0056     }
0057     local_irq_restore(flags);
0058 }
0059 
0060 /*
0061  * Hyper-V does not support this so far.
0062  */
0063 __visible bool hv_vcpu_is_preempted(int vcpu)
0064 {
0065     return false;
0066 }
0067 PV_CALLEE_SAVE_REGS_THUNK(hv_vcpu_is_preempted);
0068 
0069 void __init hv_init_spinlocks(void)
0070 {
0071     if (!hv_pvspin || !apic ||
0072         !(ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) ||
0073         !(ms_hyperv.features & HV_MSR_GUEST_IDLE_AVAILABLE)) {
0074         pr_info("PV spinlocks disabled\n");
0075         return;
0076     }
0077     pr_info("PV spinlocks enabled\n");
0078 
0079     __pv_init_lock_hash();
0080     pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
0081     pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
0082     pv_ops.lock.wait = hv_qlock_wait;
0083     pv_ops.lock.kick = hv_qlock_kick;
0084     pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
0085 }
0086 
0087 static __init int hv_parse_nopvspin(char *arg)
0088 {
0089     hv_pvspin = false;
0090     return 0;
0091 }
0092 early_param("hv_nopvspin", hv_parse_nopvspin);