0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/cpu.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/module.h>
0012 #include <linux/msi.h>
0013 #include <linux/of_irq.h>
0014 #include <linux/irqchip/chained_irq.h>
0015 #include <linux/pci.h>
0016 #include <linux/platform_device.h>
0017 #include <linux/of_pci.h>
0018
0019 #define MSI_IR0 0x000000
0020 #define MSI_INT0 0x800000
0021 #define IDX_PER_GROUP 8
0022 #define IRQS_PER_IDX 16
0023 #define NR_HW_IRQS 16
0024 #define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
0025
0026 struct xgene_msi_group {
0027 struct xgene_msi *msi;
0028 int gic_irq;
0029 u32 msi_grp;
0030 };
0031
0032 struct xgene_msi {
0033 struct device_node *node;
0034 struct irq_domain *inner_domain;
0035 struct irq_domain *msi_domain;
0036 u64 msi_addr;
0037 void __iomem *msi_regs;
0038 unsigned long *bitmap;
0039 struct mutex bitmap_lock;
0040 struct xgene_msi_group *msi_groups;
0041 int num_cpus;
0042 };
0043
0044
0045 static struct xgene_msi xgene_msi_ctrl;
0046
0047 static struct irq_chip xgene_msi_top_irq_chip = {
0048 .name = "X-Gene1 MSI",
0049 .irq_enable = pci_msi_unmask_irq,
0050 .irq_disable = pci_msi_mask_irq,
0051 .irq_mask = pci_msi_mask_irq,
0052 .irq_unmask = pci_msi_unmask_irq,
0053 };
0054
0055 static struct msi_domain_info xgene_msi_domain_info = {
0056 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
0057 MSI_FLAG_PCI_MSIX),
0058 .chip = &xgene_msi_top_irq_chip,
0059 };
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094 static u32 xgene_msi_ir_read(struct xgene_msi *msi,
0095 u32 msi_grp, u32 msir_idx)
0096 {
0097 return readl_relaxed(msi->msi_regs + MSI_IR0 +
0098 (msi_grp << 19) + (msir_idx << 16));
0099 }
0100
0101
0102 static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp)
0103 {
0104 return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16));
0105 }
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126 static u32 hwirq_to_reg_set(unsigned long hwirq)
0127 {
0128 return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX));
0129 }
0130
0131 static u32 hwirq_to_group(unsigned long hwirq)
0132 {
0133 return (hwirq % NR_HW_IRQS);
0134 }
0135
0136 static u32 hwirq_to_msi_data(unsigned long hwirq)
0137 {
0138 return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX);
0139 }
0140
0141 static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
0142 {
0143 struct xgene_msi *msi = irq_data_get_irq_chip_data(data);
0144 u32 reg_set = hwirq_to_reg_set(data->hwirq);
0145 u32 group = hwirq_to_group(data->hwirq);
0146 u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16);
0147
0148 msg->address_hi = upper_32_bits(target_addr);
0149 msg->address_lo = lower_32_bits(target_addr);
0150 msg->data = hwirq_to_msi_data(data->hwirq);
0151 }
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162 static int hwirq_to_cpu(unsigned long hwirq)
0163 {
0164 return (hwirq % xgene_msi_ctrl.num_cpus);
0165 }
0166
0167 static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq)
0168 {
0169 return (hwirq - hwirq_to_cpu(hwirq));
0170 }
0171
0172 static int xgene_msi_set_affinity(struct irq_data *irqdata,
0173 const struct cpumask *mask, bool force)
0174 {
0175 int target_cpu = cpumask_first(mask);
0176 int curr_cpu;
0177
0178 curr_cpu = hwirq_to_cpu(irqdata->hwirq);
0179 if (curr_cpu == target_cpu)
0180 return IRQ_SET_MASK_OK_DONE;
0181
0182
0183 irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu;
0184
0185 return IRQ_SET_MASK_OK;
0186 }
0187
0188 static struct irq_chip xgene_msi_bottom_irq_chip = {
0189 .name = "MSI",
0190 .irq_set_affinity = xgene_msi_set_affinity,
0191 .irq_compose_msi_msg = xgene_compose_msi_msg,
0192 };
0193
0194 static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
0195 unsigned int nr_irqs, void *args)
0196 {
0197 struct xgene_msi *msi = domain->host_data;
0198 int msi_irq;
0199
0200 mutex_lock(&msi->bitmap_lock);
0201
0202 msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0,
0203 msi->num_cpus, 0);
0204 if (msi_irq < NR_MSI_VEC)
0205 bitmap_set(msi->bitmap, msi_irq, msi->num_cpus);
0206 else
0207 msi_irq = -ENOSPC;
0208
0209 mutex_unlock(&msi->bitmap_lock);
0210
0211 if (msi_irq < 0)
0212 return msi_irq;
0213
0214 irq_domain_set_info(domain, virq, msi_irq,
0215 &xgene_msi_bottom_irq_chip, domain->host_data,
0216 handle_simple_irq, NULL, NULL);
0217
0218 return 0;
0219 }
0220
0221 static void xgene_irq_domain_free(struct irq_domain *domain,
0222 unsigned int virq, unsigned int nr_irqs)
0223 {
0224 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
0225 struct xgene_msi *msi = irq_data_get_irq_chip_data(d);
0226 u32 hwirq;
0227
0228 mutex_lock(&msi->bitmap_lock);
0229
0230 hwirq = hwirq_to_canonical_hwirq(d->hwirq);
0231 bitmap_clear(msi->bitmap, hwirq, msi->num_cpus);
0232
0233 mutex_unlock(&msi->bitmap_lock);
0234
0235 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
0236 }
0237
0238 static const struct irq_domain_ops msi_domain_ops = {
0239 .alloc = xgene_irq_domain_alloc,
0240 .free = xgene_irq_domain_free,
0241 };
0242
0243 static int xgene_allocate_domains(struct xgene_msi *msi)
0244 {
0245 msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
0246 &msi_domain_ops, msi);
0247 if (!msi->inner_domain)
0248 return -ENOMEM;
0249
0250 msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node),
0251 &xgene_msi_domain_info,
0252 msi->inner_domain);
0253
0254 if (!msi->msi_domain) {
0255 irq_domain_remove(msi->inner_domain);
0256 return -ENOMEM;
0257 }
0258
0259 return 0;
0260 }
0261
0262 static void xgene_free_domains(struct xgene_msi *msi)
0263 {
0264 if (msi->msi_domain)
0265 irq_domain_remove(msi->msi_domain);
0266 if (msi->inner_domain)
0267 irq_domain_remove(msi->inner_domain);
0268 }
0269
0270 static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
0271 {
0272 xgene_msi->bitmap = bitmap_zalloc(NR_MSI_VEC, GFP_KERNEL);
0273 if (!xgene_msi->bitmap)
0274 return -ENOMEM;
0275
0276 mutex_init(&xgene_msi->bitmap_lock);
0277
0278 xgene_msi->msi_groups = kcalloc(NR_HW_IRQS,
0279 sizeof(struct xgene_msi_group),
0280 GFP_KERNEL);
0281 if (!xgene_msi->msi_groups)
0282 return -ENOMEM;
0283
0284 return 0;
0285 }
0286
0287 static void xgene_msi_isr(struct irq_desc *desc)
0288 {
0289 struct irq_chip *chip = irq_desc_get_chip(desc);
0290 struct xgene_msi_group *msi_groups;
0291 struct xgene_msi *xgene_msi;
0292 int msir_index, msir_val, hw_irq, ret;
0293 u32 intr_index, grp_select, msi_grp;
0294
0295 chained_irq_enter(chip, desc);
0296
0297 msi_groups = irq_desc_get_handler_data(desc);
0298 xgene_msi = msi_groups->msi;
0299 msi_grp = msi_groups->msi_grp;
0300
0301
0302
0303
0304
0305
0306 grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
0307 while (grp_select) {
0308 msir_index = ffs(grp_select) - 1;
0309
0310
0311
0312
0313
0314 msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index);
0315 while (msir_val) {
0316 intr_index = ffs(msir_val) - 1;
0317
0318
0319
0320
0321
0322 hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) *
0323 NR_HW_IRQS) + msi_grp;
0324
0325
0326
0327
0328
0329 hw_irq = hwirq_to_canonical_hwirq(hw_irq);
0330 ret = generic_handle_domain_irq(xgene_msi->inner_domain, hw_irq);
0331 WARN_ON_ONCE(ret);
0332 msir_val &= ~(1 << intr_index);
0333 }
0334 grp_select &= ~(1 << msir_index);
0335
0336 if (!grp_select) {
0337
0338
0339
0340
0341
0342 grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
0343 }
0344 }
0345
0346 chained_irq_exit(chip, desc);
0347 }
0348
0349 static enum cpuhp_state pci_xgene_online;
0350
0351 static int xgene_msi_remove(struct platform_device *pdev)
0352 {
0353 struct xgene_msi *msi = platform_get_drvdata(pdev);
0354
0355 if (pci_xgene_online)
0356 cpuhp_remove_state(pci_xgene_online);
0357 cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD);
0358
0359 kfree(msi->msi_groups);
0360
0361 bitmap_free(msi->bitmap);
0362 msi->bitmap = NULL;
0363
0364 xgene_free_domains(msi);
0365
0366 return 0;
0367 }
0368
0369 static int xgene_msi_hwirq_alloc(unsigned int cpu)
0370 {
0371 struct xgene_msi *msi = &xgene_msi_ctrl;
0372 struct xgene_msi_group *msi_group;
0373 cpumask_var_t mask;
0374 int i;
0375 int err;
0376
0377 for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
0378 msi_group = &msi->msi_groups[i];
0379 if (!msi_group->gic_irq)
0380 continue;
0381
0382 irq_set_chained_handler_and_data(msi_group->gic_irq,
0383 xgene_msi_isr, msi_group);
0384
0385
0386
0387
0388
0389
0390 if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
0391 cpumask_clear(mask);
0392 cpumask_set_cpu(cpu, mask);
0393 err = irq_set_affinity(msi_group->gic_irq, mask);
0394 if (err)
0395 pr_err("failed to set affinity for GIC IRQ");
0396 free_cpumask_var(mask);
0397 } else {
0398 pr_err("failed to alloc CPU mask for affinity\n");
0399 err = -EINVAL;
0400 }
0401
0402 if (err) {
0403 irq_set_chained_handler_and_data(msi_group->gic_irq,
0404 NULL, NULL);
0405 return err;
0406 }
0407 }
0408
0409 return 0;
0410 }
0411
0412 static int xgene_msi_hwirq_free(unsigned int cpu)
0413 {
0414 struct xgene_msi *msi = &xgene_msi_ctrl;
0415 struct xgene_msi_group *msi_group;
0416 int i;
0417
0418 for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
0419 msi_group = &msi->msi_groups[i];
0420 if (!msi_group->gic_irq)
0421 continue;
0422
0423 irq_set_chained_handler_and_data(msi_group->gic_irq, NULL,
0424 NULL);
0425 }
0426 return 0;
0427 }
0428
0429 static const struct of_device_id xgene_msi_match_table[] = {
0430 {.compatible = "apm,xgene1-msi"},
0431 {},
0432 };
0433
0434 static int xgene_msi_probe(struct platform_device *pdev)
0435 {
0436 struct resource *res;
0437 int rc, irq_index;
0438 struct xgene_msi *xgene_msi;
0439 int virt_msir;
0440 u32 msi_val, msi_idx;
0441
0442 xgene_msi = &xgene_msi_ctrl;
0443
0444 platform_set_drvdata(pdev, xgene_msi);
0445
0446 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0447 xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res);
0448 if (IS_ERR(xgene_msi->msi_regs)) {
0449 rc = PTR_ERR(xgene_msi->msi_regs);
0450 goto error;
0451 }
0452 xgene_msi->msi_addr = res->start;
0453 xgene_msi->node = pdev->dev.of_node;
0454 xgene_msi->num_cpus = num_possible_cpus();
0455
0456 rc = xgene_msi_init_allocator(xgene_msi);
0457 if (rc) {
0458 dev_err(&pdev->dev, "Error allocating MSI bitmap\n");
0459 goto error;
0460 }
0461
0462 rc = xgene_allocate_domains(xgene_msi);
0463 if (rc) {
0464 dev_err(&pdev->dev, "Failed to allocate MSI domain\n");
0465 goto error;
0466 }
0467
0468 for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
0469 virt_msir = platform_get_irq(pdev, irq_index);
0470 if (virt_msir < 0) {
0471 rc = virt_msir;
0472 goto error;
0473 }
0474 xgene_msi->msi_groups[irq_index].gic_irq = virt_msir;
0475 xgene_msi->msi_groups[irq_index].msi_grp = irq_index;
0476 xgene_msi->msi_groups[irq_index].msi = xgene_msi;
0477 }
0478
0479
0480
0481
0482
0483
0484 for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
0485 for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++)
0486 xgene_msi_ir_read(xgene_msi, irq_index, msi_idx);
0487
0488
0489 msi_val = xgene_msi_int_read(xgene_msi, irq_index);
0490 if (msi_val) {
0491 dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
0492 rc = -EINVAL;
0493 goto error;
0494 }
0495 }
0496
0497 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
0498 xgene_msi_hwirq_alloc, NULL);
0499 if (rc < 0)
0500 goto err_cpuhp;
0501 pci_xgene_online = rc;
0502 rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
0503 xgene_msi_hwirq_free);
0504 if (rc)
0505 goto err_cpuhp;
0506
0507 dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
0508
0509 return 0;
0510
0511 err_cpuhp:
0512 dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
0513 error:
0514 xgene_msi_remove(pdev);
0515 return rc;
0516 }
0517
0518 static struct platform_driver xgene_msi_driver = {
0519 .driver = {
0520 .name = "xgene-msi",
0521 .of_match_table = xgene_msi_match_table,
0522 },
0523 .probe = xgene_msi_probe,
0524 .remove = xgene_msi_remove,
0525 };
0526
0527 static int __init xgene_pcie_msi_init(void)
0528 {
0529 return platform_driver_register(&xgene_msi_driver);
0530 }
0531 subsys_initcall(xgene_pcie_msi_init);