0001
0002
0003
0004
0005 #include <linux/types.h>
0006 #include <linux/threads.h>
0007 #include <linux/kernel.h>
0008 #include <linux/irq.h>
0009 #include <linux/irqdomain.h>
0010 #include <linux/debugfs.h>
0011 #include <linux/smp.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/seq_file.h>
0014 #include <linux/init.h>
0015 #include <linux/cpu.h>
0016 #include <linux/of.h>
0017 #include <linux/slab.h>
0018 #include <linux/spinlock.h>
0019 #include <linux/delay.h>
0020
0021 #include <asm/io.h>
0022 #include <asm/smp.h>
0023 #include <asm/machdep.h>
0024 #include <asm/irq.h>
0025 #include <asm/errno.h>
0026 #include <asm/rtas.h>
0027 #include <asm/xics.h>
0028 #include <asm/firmware.h>
0029
0030
0031 const struct icp_ops *icp_ops;
0032
0033 unsigned int xics_default_server = 0xff;
0034 unsigned int xics_default_distrib_server = 0;
0035 unsigned int xics_interrupt_server_size = 8;
0036
0037 DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
0038
0039 struct irq_domain *xics_host;
0040
0041 static struct ics *xics_ics;
0042
0043 void xics_update_irq_servers(void)
0044 {
0045 int i, j;
0046 struct device_node *np;
0047 u32 ilen;
0048 const __be32 *ireg;
0049 u32 hcpuid;
0050
0051
0052 np = of_get_cpu_node(boot_cpuid, NULL);
0053 BUG_ON(!np);
0054
0055 hcpuid = get_hard_smp_processor_id(boot_cpuid);
0056 xics_default_server = xics_default_distrib_server = hcpuid;
0057
0058 pr_devel("xics: xics_default_server = 0x%x\n", xics_default_server);
0059
0060 ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
0061 if (!ireg) {
0062 of_node_put(np);
0063 return;
0064 }
0065
0066 i = ilen / sizeof(int);
0067
0068
0069
0070
0071
0072
0073 for (j = 0; j < i; j += 2) {
0074 if (be32_to_cpu(ireg[j]) == hcpuid) {
0075 xics_default_distrib_server = be32_to_cpu(ireg[j+1]);
0076 break;
0077 }
0078 }
0079 pr_devel("xics: xics_default_distrib_server = 0x%x\n",
0080 xics_default_distrib_server);
0081 of_node_put(np);
0082 }
0083
0084
0085
0086
0087 void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
0088 {
0089 #ifdef CONFIG_PPC_RTAS
0090 int index;
0091 int status;
0092
0093 if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL))
0094 return;
0095
0096 index = (1UL << xics_interrupt_server_size) - 1 - gserver;
0097
0098 status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join);
0099
0100 WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n",
0101 GLOBAL_INTERRUPT_QUEUE, index, join, status);
0102 #endif
0103 }
0104
0105 void xics_setup_cpu(void)
0106 {
0107 icp_ops->set_priority(LOWEST_PRIORITY);
0108
0109 xics_set_cpu_giq(xics_default_distrib_server, 1);
0110 }
0111
0112 void xics_mask_unknown_vec(unsigned int vec)
0113 {
0114 pr_err("Interrupt 0x%x (real) is invalid, disabling it.\n", vec);
0115
0116 if (WARN_ON(!xics_ics))
0117 return;
0118 xics_ics->mask_unknown(xics_ics, vec);
0119 }
0120
0121
0122 #ifdef CONFIG_SMP
0123
0124 static void __init xics_request_ipi(void)
0125 {
0126 unsigned int ipi;
0127
0128 ipi = irq_create_mapping(xics_host, XICS_IPI);
0129 BUG_ON(!ipi);
0130
0131
0132
0133
0134 BUG_ON(request_irq(ipi, icp_ops->ipi_action,
0135 IRQF_NO_DEBUG | IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
0136 }
0137
0138 void __init xics_smp_probe(void)
0139 {
0140
0141 xics_request_ipi();
0142
0143
0144 smp_ops->cause_ipi = icp_ops->cause_ipi;
0145 }
0146
0147 #endif
0148
0149 noinstr void xics_teardown_cpu(void)
0150 {
0151 struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
0152
0153
0154
0155
0156
0157 os_cppr->index = 0;
0158 icp_ops->set_priority(0);
0159 icp_ops->teardown_cpu();
0160 }
0161
0162 noinstr void xics_kexec_teardown_cpu(int secondary)
0163 {
0164 xics_teardown_cpu();
0165
0166 icp_ops->flush_ipi();
0167
0168
0169
0170
0171
0172 if (secondary)
0173 xics_set_cpu_giq(xics_default_distrib_server, 0);
0174 }
0175
0176
0177 #ifdef CONFIG_HOTPLUG_CPU
0178
0179
0180 void xics_migrate_irqs_away(void)
0181 {
0182 int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
0183 unsigned int irq, virq;
0184 struct irq_desc *desc;
0185
0186 pr_debug("%s: CPU %u\n", __func__, cpu);
0187
0188
0189 if (hw_cpu == xics_default_server)
0190 xics_update_irq_servers();
0191
0192
0193 icp_ops->set_priority(0);
0194
0195
0196 xics_set_cpu_giq(xics_default_distrib_server, 0);
0197
0198 for_each_irq_desc(virq, desc) {
0199 struct irq_chip *chip;
0200 long server;
0201 unsigned long flags;
0202 struct irq_data *irqd;
0203
0204
0205 if (virq < NR_IRQS_LEGACY)
0206 continue;
0207
0208 if (!desc->action)
0209 continue;
0210
0211 irqd = irq_domain_get_irq_data(xics_host, virq);
0212 if (!irqd)
0213 continue;
0214 irq = irqd_to_hwirq(irqd);
0215
0216 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
0217 continue;
0218 chip = irq_desc_get_chip(desc);
0219 if (!chip || !chip->irq_set_affinity)
0220 continue;
0221
0222 raw_spin_lock_irqsave(&desc->lock, flags);
0223
0224
0225 server = xics_ics->get_server(xics_ics, irq);
0226 if (server < 0) {
0227 pr_err("%s: Can't find server for irq %d/%x\n",
0228 __func__, virq, irq);
0229 goto unlock;
0230 }
0231
0232
0233
0234
0235
0236 if (server != hw_cpu)
0237 goto unlock;
0238
0239
0240 if (cpu_online(cpu))
0241 pr_warn("IRQ %u affinity broken off cpu %u\n",
0242 virq, cpu);
0243
0244
0245 raw_spin_unlock_irqrestore(&desc->lock, flags);
0246 irq_set_affinity(virq, cpu_all_mask);
0247 continue;
0248 unlock:
0249 raw_spin_unlock_irqrestore(&desc->lock, flags);
0250 }
0251
0252
0253 mdelay(5);
0254
0255
0256
0257
0258
0259
0260
0261
0262 icp_ops->set_priority(DEFAULT_PRIORITY);
0263
0264 }
0265 #endif
0266
0267 #ifdef CONFIG_SMP
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278 int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
0279 unsigned int strict_check)
0280 {
0281
0282 if (!distribute_irqs)
0283 return xics_default_server;
0284
0285 if (!cpumask_subset(cpu_possible_mask, cpumask)) {
0286 int server = cpumask_first_and(cpu_online_mask, cpumask);
0287
0288 if (server < nr_cpu_ids)
0289 return get_hard_smp_processor_id(server);
0290
0291 if (strict_check)
0292 return -1;
0293 }
0294
0295
0296
0297
0298
0299
0300 if (cpumask_equal(cpu_online_mask, cpu_present_mask))
0301 return xics_default_distrib_server;
0302
0303 return xics_default_server;
0304 }
0305 #endif
0306
0307 static int xics_host_match(struct irq_domain *h, struct device_node *node,
0308 enum irq_domain_bus_token bus_token)
0309 {
0310 if (WARN_ON(!xics_ics))
0311 return 0;
0312 return xics_ics->host_match(xics_ics, node) ? 1 : 0;
0313 }
0314
0315
0316 static void xics_ipi_unmask(struct irq_data *d) { }
0317 static void xics_ipi_mask(struct irq_data *d) { }
0318
0319 static struct irq_chip xics_ipi_chip = {
0320 .name = "XICS",
0321 .irq_eoi = NULL,
0322 .irq_mask = xics_ipi_mask,
0323 .irq_unmask = xics_ipi_unmask,
0324 };
0325
0326 static int xics_host_map(struct irq_domain *domain, unsigned int virq,
0327 irq_hw_number_t hwirq)
0328 {
0329 pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hwirq);
0330
0331
0332
0333
0334
0335
0336 irq_clear_status_flags(virq, IRQ_LEVEL);
0337
0338
0339 if (hwirq == XICS_IPI) {
0340 irq_set_chip_and_handler(virq, &xics_ipi_chip,
0341 handle_percpu_irq);
0342 return 0;
0343 }
0344
0345 if (WARN_ON(!xics_ics))
0346 return -EINVAL;
0347
0348 if (xics_ics->check(xics_ics, hwirq))
0349 return -EINVAL;
0350
0351
0352 irq_domain_set_info(domain, virq, hwirq, xics_ics->chip,
0353 xics_ics, handle_fasteoi_irq, NULL, NULL);
0354
0355 return 0;
0356 }
0357
0358 static int xics_host_xlate(struct irq_domain *h, struct device_node *ct,
0359 const u32 *intspec, unsigned int intsize,
0360 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
0361
0362 {
0363 *out_hwirq = intspec[0];
0364
0365
0366
0367
0368
0369 if (intsize > 1) {
0370 if (intspec[1] & 1)
0371 *out_flags = IRQ_TYPE_LEVEL_LOW;
0372 else
0373 *out_flags = IRQ_TYPE_EDGE_RISING;
0374 } else
0375 *out_flags = IRQ_TYPE_LEVEL_LOW;
0376
0377 return 0;
0378 }
0379
0380 int xics_set_irq_type(struct irq_data *d, unsigned int flow_type)
0381 {
0382
0383
0384
0385
0386
0387
0388
0389 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
0390 flow_type = IRQ_TYPE_EDGE_RISING;
0391
0392 if (flow_type != IRQ_TYPE_EDGE_RISING &&
0393 flow_type != IRQ_TYPE_LEVEL_LOW)
0394 return -EINVAL;
0395
0396 irqd_set_trigger_type(d, flow_type);
0397
0398 return IRQ_SET_MASK_OK_NOCOPY;
0399 }
0400
0401 int xics_retrigger(struct irq_data *data)
0402 {
0403
0404
0405
0406
0407
0408 xics_push_cppr(0);
0409
0410
0411 return 0;
0412 }
0413
0414 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
0415 static int xics_host_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
0416 unsigned long *hwirq, unsigned int *type)
0417 {
0418 return xics_host_xlate(d, to_of_node(fwspec->fwnode), fwspec->param,
0419 fwspec->param_count, hwirq, type);
0420 }
0421
0422 static int xics_host_domain_alloc(struct irq_domain *domain, unsigned int virq,
0423 unsigned int nr_irqs, void *arg)
0424 {
0425 struct irq_fwspec *fwspec = arg;
0426 irq_hw_number_t hwirq;
0427 unsigned int type = IRQ_TYPE_NONE;
0428 int i, rc;
0429
0430 rc = xics_host_domain_translate(domain, fwspec, &hwirq, &type);
0431 if (rc)
0432 return rc;
0433
0434 pr_debug("%s %d/%lx #%d\n", __func__, virq, hwirq, nr_irqs);
0435
0436 for (i = 0; i < nr_irqs; i++)
0437 irq_domain_set_info(domain, virq + i, hwirq + i, xics_ics->chip,
0438 xics_ics, handle_fasteoi_irq, NULL, NULL);
0439
0440 return 0;
0441 }
0442
0443 static void xics_host_domain_free(struct irq_domain *domain,
0444 unsigned int virq, unsigned int nr_irqs)
0445 {
0446 pr_debug("%s %d #%d\n", __func__, virq, nr_irqs);
0447 }
0448 #endif
0449
0450 static const struct irq_domain_ops xics_host_ops = {
0451 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
0452 .alloc = xics_host_domain_alloc,
0453 .free = xics_host_domain_free,
0454 .translate = xics_host_domain_translate,
0455 #endif
0456 .match = xics_host_match,
0457 .map = xics_host_map,
0458 .xlate = xics_host_xlate,
0459 };
0460
0461 static int __init xics_allocate_domain(void)
0462 {
0463 struct fwnode_handle *fn;
0464
0465 fn = irq_domain_alloc_named_fwnode("XICS");
0466 if (!fn)
0467 return -ENOMEM;
0468
0469 xics_host = irq_domain_create_tree(fn, &xics_host_ops, NULL);
0470 if (!xics_host) {
0471 irq_domain_free_fwnode(fn);
0472 return -ENOMEM;
0473 }
0474
0475 irq_set_default_host(xics_host);
0476 return 0;
0477 }
0478
0479 void __init xics_register_ics(struct ics *ics)
0480 {
0481 if (WARN_ONCE(xics_ics, "XICS: Source Controller is already defined !"))
0482 return;
0483 xics_ics = ics;
0484 }
0485
0486 static void __init xics_get_server_size(void)
0487 {
0488 struct device_node *np;
0489 const __be32 *isize;
0490
0491
0492
0493
0494 np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics");
0495 if (!np)
0496 return;
0497
0498 isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
0499 if (isize)
0500 xics_interrupt_server_size = be32_to_cpu(*isize);
0501
0502 of_node_put(np);
0503 }
0504
0505 void __init xics_init(void)
0506 {
0507 int rc = -1;
0508
0509
0510 if (firmware_has_feature(FW_FEATURE_LPAR))
0511 rc = icp_hv_init();
0512 if (rc < 0) {
0513 rc = icp_native_init();
0514 if (rc == -ENODEV)
0515 rc = icp_opal_init();
0516 }
0517 if (rc < 0) {
0518 pr_warn("XICS: Cannot find a Presentation Controller !\n");
0519 return;
0520 }
0521
0522
0523 ppc_md.get_irq = icp_ops->get_irq;
0524
0525
0526 xics_ipi_chip.irq_eoi = icp_ops->eoi;
0527
0528
0529 rc = ics_rtas_init();
0530 if (rc < 0)
0531 rc = ics_opal_init();
0532 if (rc < 0)
0533 rc = ics_native_init();
0534 if (rc < 0)
0535 pr_warn("XICS: Cannot find a Source Controller !\n");
0536
0537
0538 xics_get_server_size();
0539 xics_update_irq_servers();
0540 rc = xics_allocate_domain();
0541 if (rc < 0)
0542 pr_err("XICS: Failed to create IRQ domain");
0543 xics_setup_cpu();
0544 }