Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Split spinlock implementation out into its own file, so it can be
0004  * compiled in a FTRACE-compatible way.
0005  */
0006 #include <linux/kernel.h>
0007 #include <linux/spinlock.h>
0008 #include <linux/slab.h>
0009 #include <linux/atomic.h>
0010 
0011 #include <asm/paravirt.h>
0012 #include <asm/qspinlock.h>
0013 
0014 #include <xen/events.h>
0015 
0016 #include "xen-ops.h"
0017 
0018 static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
0019 static DEFINE_PER_CPU(char *, irq_name);
0020 static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
0021 static bool xen_pvspin = true;
0022 
0023 static void xen_qlock_kick(int cpu)
0024 {
0025     int irq = per_cpu(lock_kicker_irq, cpu);
0026 
0027     /* Don't kick if the target's kicker interrupt is not initialized. */
0028     if (irq == -1)
0029         return;
0030 
0031     xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
0032 }
0033 
0034 /*
0035  * Halt the current CPU & release it back to the host
0036  */
0037 static void xen_qlock_wait(u8 *byte, u8 val)
0038 {
0039     int irq = __this_cpu_read(lock_kicker_irq);
0040     atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
0041 
0042     /* If kicker interrupts not initialized yet, just spin */
0043     if (irq == -1 || in_nmi())
0044         return;
0045 
0046     /* Detect reentry. */
0047     atomic_inc(nest_cnt);
0048 
0049     /* If irq pending already and no nested call clear it. */
0050     if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
0051         xen_clear_irq_pending(irq);
0052     } else if (READ_ONCE(*byte) == val) {
0053         /* Block until irq becomes pending (or a spurious wakeup) */
0054         xen_poll_irq(irq);
0055     }
0056 
0057     atomic_dec(nest_cnt);
0058 }
0059 
0060 static irqreturn_t dummy_handler(int irq, void *dev_id)
0061 {
0062     BUG();
0063     return IRQ_HANDLED;
0064 }
0065 
0066 void xen_init_lock_cpu(int cpu)
0067 {
0068     int irq;
0069     char *name;
0070 
0071     if (!xen_pvspin)
0072         return;
0073 
0074     WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
0075          cpu, per_cpu(lock_kicker_irq, cpu));
0076 
0077     name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
0078     irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
0079                      cpu,
0080                      dummy_handler,
0081                      IRQF_PERCPU|IRQF_NOBALANCING,
0082                      name,
0083                      NULL);
0084 
0085     if (irq >= 0) {
0086         disable_irq(irq); /* make sure it's never delivered */
0087         per_cpu(lock_kicker_irq, cpu) = irq;
0088         per_cpu(irq_name, cpu) = name;
0089     }
0090 
0091     printk("cpu %d spinlock event irq %d\n", cpu, irq);
0092 }
0093 
0094 void xen_uninit_lock_cpu(int cpu)
0095 {
0096     int irq;
0097 
0098     if (!xen_pvspin)
0099         return;
0100 
0101     /*
0102      * When booting the kernel with 'mitigations=auto,nosmt', the secondary
0103      * CPUs are not activated, and lock_kicker_irq is not initialized.
0104      */
0105     irq = per_cpu(lock_kicker_irq, cpu);
0106     if (irq == -1)
0107         return;
0108 
0109     unbind_from_irqhandler(irq, NULL);
0110     per_cpu(lock_kicker_irq, cpu) = -1;
0111     kfree(per_cpu(irq_name, cpu));
0112     per_cpu(irq_name, cpu) = NULL;
0113 }
0114 
0115 PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
0116 
0117 /*
0118  * Our init of PV spinlocks is split in two init functions due to us
0119  * using paravirt patching and jump labels patching and having to do
0120  * all of this before SMP code is invoked.
0121  *
0122  * The paravirt patching needs to be done _before_ the alternative asm code
0123  * is started, otherwise we would not patch the core kernel code.
0124  */
0125 void __init xen_init_spinlocks(void)
0126 {
0127     /*  Don't need to use pvqspinlock code if there is only 1 vCPU. */
0128     if (num_possible_cpus() == 1 || nopvspin)
0129         xen_pvspin = false;
0130 
0131     if (!xen_pvspin) {
0132         printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
0133         static_branch_disable(&virt_spin_lock_key);
0134         return;
0135     }
0136     printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
0137 
0138     __pv_init_lock_hash();
0139     pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
0140     pv_ops.lock.queued_spin_unlock =
0141         PV_CALLEE_SAVE(__pv_queued_spin_unlock);
0142     pv_ops.lock.wait = xen_qlock_wait;
0143     pv_ops.lock.kick = xen_qlock_kick;
0144     pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
0145 }
0146 
0147 static __init int xen_parse_nopvspin(char *arg)
0148 {
0149     pr_notice("\"xen_nopvspin\" is deprecated, please use \"nopvspin\" instead\n");
0150     xen_pvspin = false;
0151     return 0;
0152 }
0153 early_param("xen_nopvspin", xen_parse_nopvspin);
0154