0001
0002
0003
0004
0005 #include <linux/types.h>
0006 #include <linux/kernel.h>
0007 #include <linux/irq.h>
0008 #include <linux/smp.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/irqdomain.h>
0011 #include <linux/cpu.h>
0012 #include <linux/of.h>
0013
0014 #include <asm/smp.h>
0015 #include <asm/irq.h>
0016 #include <asm/errno.h>
0017 #include <asm/xics.h>
0018 #include <asm/io.h>
0019 #include <asm/hvcall.h>
0020
0021 static inline unsigned int icp_hv_get_xirr(unsigned char cppr)
0022 {
0023 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
0024 long rc;
0025 unsigned int ret = XICS_IRQ_SPURIOUS;
0026
0027 rc = plpar_hcall(H_XIRR, retbuf, cppr);
0028 if (rc == H_SUCCESS) {
0029 ret = (unsigned int)retbuf[0];
0030 } else {
0031 pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n",
0032 __func__, cppr, rc);
0033 WARN_ON_ONCE(1);
0034 }
0035
0036 return ret;
0037 }
0038
0039 static inline void icp_hv_set_cppr(u8 value)
0040 {
0041 long rc = plpar_hcall_norets(H_CPPR, value);
0042 if (rc != H_SUCCESS) {
0043 pr_err("%s: bad return code cppr cppr=0x%x returned %ld\n",
0044 __func__, value, rc);
0045 WARN_ON_ONCE(1);
0046 }
0047 }
0048
0049 static inline void icp_hv_set_xirr(unsigned int value)
0050 {
0051 long rc = plpar_hcall_norets(H_EOI, value);
0052 if (rc != H_SUCCESS) {
0053 pr_err("%s: bad return code eoi xirr=0x%x returned %ld\n",
0054 __func__, value, rc);
0055 WARN_ON_ONCE(1);
0056 icp_hv_set_cppr(value >> 24);
0057 }
0058 }
0059
0060 static inline void icp_hv_set_qirr(int n_cpu , u8 value)
0061 {
0062 int hw_cpu = get_hard_smp_processor_id(n_cpu);
0063 long rc;
0064
0065
0066 mb();
0067 rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
0068 if (rc != H_SUCCESS) {
0069 pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
0070 "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
0071 WARN_ON_ONCE(1);
0072 }
0073 }
0074
0075 static void icp_hv_eoi(struct irq_data *d)
0076 {
0077 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
0078
0079 iosync();
0080 icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq);
0081 }
0082
0083 static void icp_hv_teardown_cpu(void)
0084 {
0085 int cpu = smp_processor_id();
0086
0087
0088 icp_hv_set_qirr(cpu, 0xff);
0089 }
0090
0091 static void icp_hv_flush_ipi(void)
0092 {
0093
0094
0095
0096
0097
0098
0099
0100
0101 icp_hv_set_xirr((0x00 << 24) | XICS_IPI);
0102 }
0103
0104 static unsigned int icp_hv_get_irq(void)
0105 {
0106 unsigned int xirr = icp_hv_get_xirr(xics_cppr_top());
0107 unsigned int vec = xirr & 0x00ffffff;
0108 unsigned int irq;
0109
0110 if (vec == XICS_IRQ_SPURIOUS)
0111 return 0;
0112
0113 irq = irq_find_mapping(xics_host, vec);
0114 if (likely(irq)) {
0115 xics_push_cppr(vec);
0116 return irq;
0117 }
0118
0119
0120 xics_mask_unknown_vec(vec);
0121
0122
0123 icp_hv_set_xirr(xirr);
0124
0125 return 0;
0126 }
0127
0128 static void icp_hv_set_cpu_priority(unsigned char cppr)
0129 {
0130 xics_set_base_cppr(cppr);
0131 icp_hv_set_cppr(cppr);
0132 iosync();
0133 }
0134
0135 #ifdef CONFIG_SMP
0136
0137 static void icp_hv_cause_ipi(int cpu)
0138 {
0139 icp_hv_set_qirr(cpu, IPI_PRIORITY);
0140 }
0141
0142 static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id)
0143 {
0144 int cpu = smp_processor_id();
0145
0146 icp_hv_set_qirr(cpu, 0xff);
0147
0148 return smp_ipi_demux();
0149 }
0150
0151 #endif
0152
0153 static const struct icp_ops icp_hv_ops = {
0154 .get_irq = icp_hv_get_irq,
0155 .eoi = icp_hv_eoi,
0156 .set_priority = icp_hv_set_cpu_priority,
0157 .teardown_cpu = icp_hv_teardown_cpu,
0158 .flush_ipi = icp_hv_flush_ipi,
0159 #ifdef CONFIG_SMP
0160 .ipi_action = icp_hv_ipi_action,
0161 .cause_ipi = icp_hv_cause_ipi,
0162 #endif
0163 };
0164
0165 int __init icp_hv_init(void)
0166 {
0167 struct device_node *np;
0168
0169 np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp");
0170 if (!np)
0171 np = of_find_node_by_type(NULL,
0172 "PowerPC-External-Interrupt-Presentation");
0173 if (!np)
0174 return -ENODEV;
0175
0176 icp_ops = &icp_hv_ops;
0177
0178 of_node_put(np);
0179 return 0;
0180 }
0181