Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Copyright 2016 IBM Corporation.
0004  */
0005 #include <linux/types.h>
0006 #include <linux/kernel.h>
0007 #include <linux/irq.h>
0008 #include <linux/smp.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/irqdomain.h>
0011 #include <linux/cpu.h>
0012 #include <linux/of.h>
0013 
0014 #include <asm/smp.h>
0015 #include <asm/irq.h>
0016 #include <asm/errno.h>
0017 #include <asm/xics.h>
0018 #include <asm/io.h>
0019 #include <asm/opal.h>
0020 #include <asm/kvm_ppc.h>
0021 
0022 static void icp_opal_teardown_cpu(void)
0023 {
0024     int hw_cpu = hard_smp_processor_id();
0025 
0026     /* Clear any pending IPI */
0027     opal_int_set_mfrr(hw_cpu, 0xff);
0028 }
0029 
0030 static void icp_opal_flush_ipi(void)
0031 {
0032     /*
0033      * We take the ipi irq but and never return so we need to EOI the IPI,
0034      * but want to leave our priority 0.
0035      *
0036      * Should we check all the other interrupts too?
0037      * Should we be flagging idle loop instead?
0038      * Or creating some task to be scheduled?
0039      */
0040     if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
0041         force_external_irq_replay();
0042 }
0043 
0044 static unsigned int icp_opal_get_xirr(void)
0045 {
0046     unsigned int kvm_xirr;
0047     __be32 hw_xirr;
0048     int64_t rc;
0049 
0050     /* Handle an interrupt latched by KVM first */
0051     kvm_xirr = kvmppc_get_xics_latch();
0052     if (kvm_xirr)
0053         return kvm_xirr;
0054 
0055     /* Then ask OPAL */
0056     rc = opal_int_get_xirr(&hw_xirr, false);
0057     if (rc < 0)
0058         return 0;
0059     return be32_to_cpu(hw_xirr);
0060 }
0061 
0062 static unsigned int icp_opal_get_irq(void)
0063 {
0064     unsigned int xirr;
0065     unsigned int vec;
0066     unsigned int irq;
0067 
0068     xirr = icp_opal_get_xirr();
0069     vec = xirr & 0x00ffffff;
0070     if (vec == XICS_IRQ_SPURIOUS)
0071         return 0;
0072 
0073     irq = irq_find_mapping(xics_host, vec);
0074     if (likely(irq)) {
0075         xics_push_cppr(vec);
0076         return irq;
0077     }
0078 
0079     /* We don't have a linux mapping, so have rtas mask it. */
0080     xics_mask_unknown_vec(vec);
0081 
0082     /* We might learn about it later, so EOI it */
0083     if (opal_int_eoi(xirr) > 0)
0084         force_external_irq_replay();
0085 
0086     return 0;
0087 }
0088 
0089 static void icp_opal_set_cpu_priority(unsigned char cppr)
0090 {
0091     /*
0092      * Here be dragons. The caller has asked to allow only IPI's and not
0093      * external interrupts. But OPAL XIVE doesn't support that. So instead
0094      * of allowing no interrupts allow all. That's still not right, but
0095      * currently the only caller who does this is xics_migrate_irqs_away()
0096      * and it works in that case.
0097      */
0098     if (cppr >= DEFAULT_PRIORITY)
0099         cppr = LOWEST_PRIORITY;
0100 
0101     xics_set_base_cppr(cppr);
0102     opal_int_set_cppr(cppr);
0103     iosync();
0104 }
0105 
0106 static void icp_opal_eoi(struct irq_data *d)
0107 {
0108     unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
0109     int64_t rc;
0110 
0111     iosync();
0112     rc = opal_int_eoi((xics_pop_cppr() << 24) | hw_irq);
0113 
0114     /*
0115      * EOI tells us whether there are more interrupts to fetch.
0116      *
0117      * Some HW implementations might not be able to send us another
0118      * external interrupt in that case, so we force a replay.
0119      */
0120     if (rc > 0)
0121         force_external_irq_replay();
0122 }
0123 
0124 #ifdef CONFIG_SMP
0125 
0126 static void icp_opal_cause_ipi(int cpu)
0127 {
0128     int hw_cpu = get_hard_smp_processor_id(cpu);
0129 
0130     kvmppc_set_host_ipi(cpu);
0131     opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
0132 }
0133 
0134 static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
0135 {
0136     int cpu = smp_processor_id();
0137 
0138     kvmppc_clear_host_ipi(cpu);
0139     opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
0140 
0141     return smp_ipi_demux();
0142 }
0143 
0144 /*
0145  * Called when an interrupt is received on an off-line CPU to
0146  * clear the interrupt, so that the CPU can go back to nap mode.
0147  */
0148 void icp_opal_flush_interrupt(void)
0149 {
0150     unsigned int xirr;
0151     unsigned int vec;
0152 
0153     do {
0154         xirr = icp_opal_get_xirr();
0155         vec = xirr & 0x00ffffff;
0156         if (vec == XICS_IRQ_SPURIOUS)
0157             break;
0158         if (vec == XICS_IPI) {
0159             /* Clear pending IPI */
0160             int cpu = smp_processor_id();
0161             kvmppc_clear_host_ipi(cpu);
0162             opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
0163         } else {
0164             pr_err("XICS: hw interrupt 0x%x to offline cpu, "
0165                    "disabling\n", vec);
0166             xics_mask_unknown_vec(vec);
0167         }
0168 
0169         /* EOI the interrupt */
0170     } while (opal_int_eoi(xirr) > 0);
0171 }
0172 
0173 #endif /* CONFIG_SMP */
0174 
0175 static const struct icp_ops icp_opal_ops = {
0176     .get_irq    = icp_opal_get_irq,
0177     .eoi        = icp_opal_eoi,
0178     .set_priority   = icp_opal_set_cpu_priority,
0179     .teardown_cpu   = icp_opal_teardown_cpu,
0180     .flush_ipi  = icp_opal_flush_ipi,
0181 #ifdef CONFIG_SMP
0182     .ipi_action = icp_opal_ipi_action,
0183     .cause_ipi  = icp_opal_cause_ipi,
0184 #endif
0185 };
0186 
0187 int __init icp_opal_init(void)
0188 {
0189     struct device_node *np;
0190 
0191     np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
0192     if (!np)
0193         return -ENODEV;
0194 
0195     icp_ops = &icp_opal_ops;
0196 
0197     printk("XICS: Using OPAL ICP fallbacks\n");
0198 
0199     of_node_put(np);
0200     return 0;
0201 }
0202