0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #include <linux/interrupt.h>
0020 #include <linux/irq.h>
0021 #include <linux/irqdomain.h>
0022 #include <linux/export.h>
0023 #include <linux/percpu.h>
0024 #include <linux/types.h>
0025 #include <linux/ioport.h>
0026 #include <linux/kernel_stat.h>
0027 #include <linux/pgtable.h>
0028 #include <linux/of_address.h>
0029
0030 #include <asm/io.h>
0031 #include <asm/ptrace.h>
0032 #include <asm/machdep.h>
0033 #include <asm/cell-regs.h>
0034
0035 #include "interrupt.h"
0036
0037 struct iic {
0038 struct cbe_iic_thread_regs __iomem *regs;
0039 u8 target_id;
0040 u8 eoi_stack[16];
0041 int eoi_ptr;
0042 struct device_node *node;
0043 };
0044
0045 static DEFINE_PER_CPU(struct iic, cpu_iic);
0046 #define IIC_NODE_COUNT 2
0047 static struct irq_domain *iic_host;
0048
0049
0050 static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
0051 {
0052 unsigned char unit = bits.source & 0xf;
0053 unsigned char node = bits.source >> 4;
0054 unsigned char class = bits.class & 3;
0055
0056
0057 if (bits.flags & CBE_IIC_IRQ_IPI)
0058 return IIC_IRQ_TYPE_IPI | (bits.prio >> 4);
0059 else
0060 return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit;
0061 }
0062
0063 static void iic_mask(struct irq_data *d)
0064 {
0065 }
0066
0067 static void iic_unmask(struct irq_data *d)
0068 {
0069 }
0070
0071 static void iic_eoi(struct irq_data *d)
0072 {
0073 struct iic *iic = this_cpu_ptr(&cpu_iic);
0074 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
0075 BUG_ON(iic->eoi_ptr < 0);
0076 }
0077
0078 static struct irq_chip iic_chip = {
0079 .name = "CELL-IIC",
0080 .irq_mask = iic_mask,
0081 .irq_unmask = iic_unmask,
0082 .irq_eoi = iic_eoi,
0083 };
0084
0085
0086 static void iic_ioexc_eoi(struct irq_data *d)
0087 {
0088 }
0089
0090 static void iic_ioexc_cascade(struct irq_desc *desc)
0091 {
0092 struct irq_chip *chip = irq_desc_get_chip(desc);
0093 struct cbe_iic_regs __iomem *node_iic =
0094 (void __iomem *)irq_desc_get_handler_data(desc);
0095 unsigned int irq = irq_desc_get_irq(desc);
0096 unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
0097 unsigned long bits, ack;
0098 int cascade;
0099
0100 for (;;) {
0101 bits = in_be64(&node_iic->iic_is);
0102 if (bits == 0)
0103 break;
0104
0105 ack = bits & IIC_ISR_EDGE_MASK;
0106 if (ack)
0107 out_be64(&node_iic->iic_is, ack);
0108
0109 for (cascade = 63; cascade >= 0; cascade--)
0110 if (bits & (0x8000000000000000UL >> cascade))
0111 generic_handle_domain_irq(iic_host,
0112 base | cascade);
0113
0114 ack = bits & ~IIC_ISR_EDGE_MASK;
0115 if (ack)
0116 out_be64(&node_iic->iic_is, ack);
0117 }
0118 chip->irq_eoi(&desc->irq_data);
0119 }
0120
0121
0122 static struct irq_chip iic_ioexc_chip = {
0123 .name = "CELL-IOEX",
0124 .irq_mask = iic_mask,
0125 .irq_unmask = iic_unmask,
0126 .irq_eoi = iic_ioexc_eoi,
0127 };
0128
0129
0130 static unsigned int iic_get_irq(void)
0131 {
0132 struct cbe_iic_pending_bits pending;
0133 struct iic *iic;
0134 unsigned int virq;
0135
0136 iic = this_cpu_ptr(&cpu_iic);
0137 *(unsigned long *) &pending =
0138 in_be64((u64 __iomem *) &iic->regs->pending_destr);
0139 if (!(pending.flags & CBE_IIC_IRQ_VALID))
0140 return 0;
0141 virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
0142 if (!virq)
0143 return 0;
0144 iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
0145 BUG_ON(iic->eoi_ptr > 15);
0146 return virq;
0147 }
0148
0149 void iic_setup_cpu(void)
0150 {
0151 out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff);
0152 }
0153
0154 u8 iic_get_target_id(int cpu)
0155 {
0156 return per_cpu(cpu_iic, cpu).target_id;
0157 }
0158
0159 EXPORT_SYMBOL_GPL(iic_get_target_id);
0160
0161 #ifdef CONFIG_SMP
0162
0163
0164 static inline int iic_msg_to_irq(int msg)
0165 {
0166 return IIC_IRQ_TYPE_IPI + 0xf - msg;
0167 }
0168
0169 void iic_message_pass(int cpu, int msg)
0170 {
0171 out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4);
0172 }
0173
0174 static void iic_request_ipi(int msg)
0175 {
0176 int virq;
0177
0178 virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg));
0179 if (!virq) {
0180 printk(KERN_ERR
0181 "iic: failed to map IPI %s\n", smp_ipi_name[msg]);
0182 return;
0183 }
0184
0185
0186
0187
0188
0189 if (smp_request_message_ipi(virq, msg))
0190 irq_dispose_mapping(virq);
0191 }
0192
0193 void iic_request_IPIs(void)
0194 {
0195 iic_request_ipi(PPC_MSG_CALL_FUNCTION);
0196 iic_request_ipi(PPC_MSG_RESCHEDULE);
0197 iic_request_ipi(PPC_MSG_TICK_BROADCAST);
0198 iic_request_ipi(PPC_MSG_NMI_IPI);
0199 }
0200
0201 #endif
0202
0203
0204 static int iic_host_match(struct irq_domain *h, struct device_node *node,
0205 enum irq_domain_bus_token bus_token)
0206 {
0207 return of_device_is_compatible(node,
0208 "IBM,CBEA-Internal-Interrupt-Controller");
0209 }
0210
0211 static int iic_host_map(struct irq_domain *h, unsigned int virq,
0212 irq_hw_number_t hw)
0213 {
0214 switch (hw & IIC_IRQ_TYPE_MASK) {
0215 case IIC_IRQ_TYPE_IPI:
0216 irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
0217 break;
0218 case IIC_IRQ_TYPE_IOEXC:
0219 irq_set_chip_and_handler(virq, &iic_ioexc_chip,
0220 handle_edge_eoi_irq);
0221 break;
0222 default:
0223 irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq);
0224 }
0225 return 0;
0226 }
0227
0228 static int iic_host_xlate(struct irq_domain *h, struct device_node *ct,
0229 const u32 *intspec, unsigned int intsize,
0230 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
0231
0232 {
0233 unsigned int node, ext, unit, class;
0234 const u32 *val;
0235
0236 if (!of_device_is_compatible(ct,
0237 "IBM,CBEA-Internal-Interrupt-Controller"))
0238 return -ENODEV;
0239 if (intsize != 1)
0240 return -ENODEV;
0241 val = of_get_property(ct, "#interrupt-cells", NULL);
0242 if (val == NULL || *val != 1)
0243 return -ENODEV;
0244
0245 node = intspec[0] >> 24;
0246 ext = (intspec[0] >> 16) & 0xff;
0247 class = (intspec[0] >> 8) & 0xff;
0248 unit = intspec[0] & 0xff;
0249
0250
0251 if (node > 1)
0252 return -EINVAL;
0253
0254
0255 *out_hwirq = (node << IIC_IRQ_NODE_SHIFT);
0256 if (unit == IIC_UNIT_IIC && class == 1)
0257 *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext;
0258 else
0259 *out_hwirq |= IIC_IRQ_TYPE_NORMAL |
0260 (class << IIC_IRQ_CLASS_SHIFT) | unit;
0261
0262
0263 *out_flags = IRQ_TYPE_EDGE_RISING;
0264
0265 return 0;
0266 }
0267
0268 static const struct irq_domain_ops iic_host_ops = {
0269 .match = iic_host_match,
0270 .map = iic_host_map,
0271 .xlate = iic_host_xlate,
0272 };
0273
0274 static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
0275 struct device_node *node)
0276 {
0277
0278
0279
0280 struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
0281
0282 iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
0283 BUG_ON(iic->regs == NULL);
0284
0285 iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe);
0286 iic->eoi_stack[0] = 0xff;
0287 iic->node = of_node_get(node);
0288 out_be64(&iic->regs->prio, 0);
0289
0290 printk(KERN_INFO "IIC for CPU %d target id 0x%x : %pOF\n",
0291 hw_cpu, iic->target_id, node);
0292 }
0293
0294 static int __init setup_iic(void)
0295 {
0296 struct device_node *dn;
0297 struct resource r0, r1;
0298 unsigned int node, cascade, found = 0;
0299 struct cbe_iic_regs __iomem *node_iic;
0300 const u32 *np;
0301
0302 for_each_node_by_name(dn, "interrupt-controller") {
0303 if (!of_device_is_compatible(dn,
0304 "IBM,CBEA-Internal-Interrupt-Controller"))
0305 continue;
0306 np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL);
0307 if (np == NULL) {
0308 printk(KERN_WARNING "IIC: CPU association not found\n");
0309 of_node_put(dn);
0310 return -ENODEV;
0311 }
0312 if (of_address_to_resource(dn, 0, &r0) ||
0313 of_address_to_resource(dn, 1, &r1)) {
0314 printk(KERN_WARNING "IIC: Can't resolve addresses\n");
0315 of_node_put(dn);
0316 return -ENODEV;
0317 }
0318 found++;
0319 init_one_iic(np[0], r0.start, dn);
0320 init_one_iic(np[1], r1.start, dn);
0321
0322
0323
0324
0325
0326
0327 node = np[0] >> 1;
0328 node_iic = cbe_get_cpu_iic_regs(np[0]);
0329 cascade = node << IIC_IRQ_NODE_SHIFT;
0330 cascade |= 1 << IIC_IRQ_CLASS_SHIFT;
0331 cascade |= IIC_UNIT_IIC;
0332 cascade = irq_create_mapping(iic_host, cascade);
0333 if (!cascade)
0334 continue;
0335
0336
0337
0338
0339 irq_set_handler_data(cascade, (void __force *)node_iic);
0340 irq_set_chained_handler(cascade, iic_ioexc_cascade);
0341 out_be64(&node_iic->iic_ir,
0342 (1 << 12) |
0343 (node << 4) |
0344 IIC_UNIT_THREAD_0 );
0345
0346
0347
0348 out_be64(&node_iic->iic_is, 0xfffffffffffffffful);
0349 }
0350
0351 if (found)
0352 return 0;
0353 else
0354 return -ENODEV;
0355 }
0356
0357 void __init iic_init_IRQ(void)
0358 {
0359
0360 iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops,
0361 NULL);
0362 BUG_ON(iic_host == NULL);
0363 irq_set_default_host(iic_host);
0364
0365
0366 if (setup_iic() < 0)
0367 panic("IIC: Failed to initialize !\n");
0368
0369
0370 ppc_md.get_irq = iic_get_irq;
0371
0372
0373 iic_setup_cpu();
0374 }
0375
0376 void iic_set_interrupt_routing(int cpu, int thread, int priority)
0377 {
0378 struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
0379 u64 iic_ir = 0;
0380 int node = cpu >> 1;
0381
0382
0383 iic_ir |= CBE_IIC_IR_PRIO(priority) |
0384 CBE_IIC_IR_DEST_NODE(node);
0385 if (thread == 0)
0386 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0);
0387 else
0388 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1);
0389 out_be64(&iic_regs->iic_ir, iic_ir);
0390 }