0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/kernel.h>
0011 #include <linux/module.h>
0012 #include <linux/msi.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/irq.h>
0015 #include <linux/irqchip/chained_irq.h>
0016 #include <linux/irqdomain.h>
0017 #include <linux/of_irq.h>
0018 #include <linux/of_pci.h>
0019 #include <linux/of_platform.h>
0020 #include <linux/spinlock.h>
0021 #include <linux/dma-iommu.h>
0022
0023 #define MSI_IRQS_PER_MSIR 32
0024 #define MSI_MSIR_OFFSET 4
0025
0026 #define MSI_LS1043V1_1_IRQS_PER_MSIR 8
0027 #define MSI_LS1043V1_1_MSIR_OFFSET 0x10
0028
0029 struct ls_scfg_msi_cfg {
0030 u32 ibs_shift;
0031 u32 msir_irqs;
0032 u32 msir_base;
0033 };
0034
0035 struct ls_scfg_msir {
0036 struct ls_scfg_msi *msi_data;
0037 unsigned int index;
0038 unsigned int gic_irq;
0039 unsigned int bit_start;
0040 unsigned int bit_end;
0041 unsigned int srs;
0042 void __iomem *reg;
0043 };
0044
0045 struct ls_scfg_msi {
0046 spinlock_t lock;
0047 struct platform_device *pdev;
0048 struct irq_domain *parent;
0049 struct irq_domain *msi_domain;
0050 void __iomem *regs;
0051 phys_addr_t msiir_addr;
0052 struct ls_scfg_msi_cfg *cfg;
0053 u32 msir_num;
0054 struct ls_scfg_msir *msir;
0055 u32 irqs_num;
0056 unsigned long *used;
0057 };
0058
0059 static struct irq_chip ls_scfg_msi_irq_chip = {
0060 .name = "MSI",
0061 .irq_mask = pci_msi_mask_irq,
0062 .irq_unmask = pci_msi_unmask_irq,
0063 };
0064
0065 static struct msi_domain_info ls_scfg_msi_domain_info = {
0066 .flags = (MSI_FLAG_USE_DEF_DOM_OPS |
0067 MSI_FLAG_USE_DEF_CHIP_OPS |
0068 MSI_FLAG_PCI_MSIX),
0069 .chip = &ls_scfg_msi_irq_chip,
0070 };
0071
0072 static int msi_affinity_flag = 1;
0073
0074 static int __init early_parse_ls_scfg_msi(char *p)
0075 {
0076 if (p && strncmp(p, "no-affinity", 11) == 0)
0077 msi_affinity_flag = 0;
0078 else
0079 msi_affinity_flag = 1;
0080
0081 return 0;
0082 }
0083 early_param("lsmsi", early_parse_ls_scfg_msi);
0084
0085 static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
0086 {
0087 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
0088
0089 msg->address_hi = upper_32_bits(msi_data->msiir_addr);
0090 msg->address_lo = lower_32_bits(msi_data->msiir_addr);
0091 msg->data = data->hwirq;
0092
0093 if (msi_affinity_flag) {
0094 const struct cpumask *mask;
0095
0096 mask = irq_data_get_effective_affinity_mask(data);
0097 msg->data |= cpumask_first(mask);
0098 }
0099
0100 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
0101 }
0102
0103 static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
0104 const struct cpumask *mask, bool force)
0105 {
0106 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
0107 u32 cpu;
0108
0109 if (!msi_affinity_flag)
0110 return -EINVAL;
0111
0112 if (!force)
0113 cpu = cpumask_any_and(mask, cpu_online_mask);
0114 else
0115 cpu = cpumask_first(mask);
0116
0117 if (cpu >= msi_data->msir_num)
0118 return -EINVAL;
0119
0120 if (msi_data->msir[cpu].gic_irq <= 0) {
0121 pr_warn("cannot bind the irq to cpu%d\n", cpu);
0122 return -EINVAL;
0123 }
0124
0125 irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
0126
0127 return IRQ_SET_MASK_OK;
0128 }
0129
0130 static struct irq_chip ls_scfg_msi_parent_chip = {
0131 .name = "SCFG",
0132 .irq_compose_msi_msg = ls_scfg_msi_compose_msg,
0133 .irq_set_affinity = ls_scfg_msi_set_affinity,
0134 };
0135
0136 static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
0137 unsigned int virq,
0138 unsigned int nr_irqs,
0139 void *args)
0140 {
0141 msi_alloc_info_t *info = args;
0142 struct ls_scfg_msi *msi_data = domain->host_data;
0143 int pos, err = 0;
0144
0145 WARN_ON(nr_irqs != 1);
0146
0147 spin_lock(&msi_data->lock);
0148 pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
0149 if (pos < msi_data->irqs_num)
0150 __set_bit(pos, msi_data->used);
0151 else
0152 err = -ENOSPC;
0153 spin_unlock(&msi_data->lock);
0154
0155 if (err)
0156 return err;
0157
0158 err = iommu_dma_prepare_msi(info->desc, msi_data->msiir_addr);
0159 if (err)
0160 return err;
0161
0162 irq_domain_set_info(domain, virq, pos,
0163 &ls_scfg_msi_parent_chip, msi_data,
0164 handle_simple_irq, NULL, NULL);
0165
0166 return 0;
0167 }
0168
0169 static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
0170 unsigned int virq, unsigned int nr_irqs)
0171 {
0172 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
0173 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(d);
0174 int pos;
0175
0176 pos = d->hwirq;
0177 if (pos < 0 || pos >= msi_data->irqs_num) {
0178 pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
0179 return;
0180 }
0181
0182 spin_lock(&msi_data->lock);
0183 __clear_bit(pos, msi_data->used);
0184 spin_unlock(&msi_data->lock);
0185 }
0186
0187 static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
0188 .alloc = ls_scfg_msi_domain_irq_alloc,
0189 .free = ls_scfg_msi_domain_irq_free,
0190 };
0191
0192 static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
0193 {
0194 struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
0195 struct ls_scfg_msi *msi_data = msir->msi_data;
0196 unsigned long val;
0197 int pos, size, hwirq;
0198
0199 chained_irq_enter(irq_desc_get_chip(desc), desc);
0200
0201 val = ioread32be(msir->reg);
0202
0203 pos = msir->bit_start;
0204 size = msir->bit_end + 1;
0205
0206 for_each_set_bit_from(pos, &val, size) {
0207 hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
0208 msir->srs;
0209 generic_handle_domain_irq(msi_data->parent, hwirq);
0210 }
0211
0212 chained_irq_exit(irq_desc_get_chip(desc), desc);
0213 }
0214
0215 static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
0216 {
0217
0218 msi_data->parent = irq_domain_add_linear(NULL,
0219 msi_data->irqs_num,
0220 &ls_scfg_msi_domain_ops,
0221 msi_data);
0222 if (!msi_data->parent) {
0223 dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n");
0224 return -ENOMEM;
0225 }
0226
0227 msi_data->msi_domain = pci_msi_create_irq_domain(
0228 of_node_to_fwnode(msi_data->pdev->dev.of_node),
0229 &ls_scfg_msi_domain_info,
0230 msi_data->parent);
0231 if (!msi_data->msi_domain) {
0232 dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n");
0233 irq_domain_remove(msi_data->parent);
0234 return -ENOMEM;
0235 }
0236
0237 return 0;
0238 }
0239
0240 static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
0241 {
0242 struct ls_scfg_msir *msir;
0243 int virq, i, hwirq;
0244
0245 virq = platform_get_irq(msi_data->pdev, index);
0246 if (virq <= 0)
0247 return -ENODEV;
0248
0249 msir = &msi_data->msir[index];
0250 msir->index = index;
0251 msir->msi_data = msi_data;
0252 msir->gic_irq = virq;
0253 msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
0254
0255 if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
0256 msir->bit_start = 32 - ((msir->index + 1) *
0257 MSI_LS1043V1_1_IRQS_PER_MSIR);
0258 msir->bit_end = msir->bit_start +
0259 MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
0260 } else {
0261 msir->bit_start = 0;
0262 msir->bit_end = msi_data->cfg->msir_irqs - 1;
0263 }
0264
0265 irq_set_chained_handler_and_data(msir->gic_irq,
0266 ls_scfg_msi_irq_handler,
0267 msir);
0268
0269 if (msi_affinity_flag) {
0270
0271 irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
0272 msir->srs = 0;
0273 } else
0274 msir->srs = index;
0275
0276
0277 if (!msi_affinity_flag || msir->index == 0) {
0278 for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
0279 hwirq = i << msi_data->cfg->ibs_shift | msir->index;
0280 bitmap_clear(msi_data->used, hwirq, 1);
0281 }
0282 }
0283
0284 return 0;
0285 }
0286
0287 static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
0288 {
0289 struct ls_scfg_msi *msi_data = msir->msi_data;
0290 int i, hwirq;
0291
0292 if (msir->gic_irq > 0)
0293 irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
0294
0295 for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
0296 hwirq = i << msi_data->cfg->ibs_shift | msir->index;
0297 bitmap_set(msi_data->used, hwirq, 1);
0298 }
0299
0300 return 0;
0301 }
0302
0303 static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
0304 .ibs_shift = 3,
0305 .msir_irqs = MSI_IRQS_PER_MSIR,
0306 .msir_base = MSI_MSIR_OFFSET,
0307 };
0308
0309 static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
0310 .ibs_shift = 2,
0311 .msir_irqs = MSI_IRQS_PER_MSIR,
0312 .msir_base = MSI_MSIR_OFFSET,
0313 };
0314
0315 static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
0316 .ibs_shift = 2,
0317 .msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
0318 .msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
0319 };
0320
0321 static const struct of_device_id ls_scfg_msi_id[] = {
0322
0323 { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
0324 { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
0325
0326 { .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg },
0327 { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
0328 { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
0329 { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
0330 { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
0331 {},
0332 };
0333 MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
0334
0335 static int ls_scfg_msi_probe(struct platform_device *pdev)
0336 {
0337 const struct of_device_id *match;
0338 struct ls_scfg_msi *msi_data;
0339 struct resource *res;
0340 int i, ret;
0341
0342 match = of_match_device(ls_scfg_msi_id, &pdev->dev);
0343 if (!match)
0344 return -ENODEV;
0345
0346 msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
0347 if (!msi_data)
0348 return -ENOMEM;
0349
0350 msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data;
0351
0352 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0353 msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
0354 if (IS_ERR(msi_data->regs)) {
0355 dev_err(&pdev->dev, "failed to initialize 'regs'\n");
0356 return PTR_ERR(msi_data->regs);
0357 }
0358 msi_data->msiir_addr = res->start;
0359
0360 msi_data->pdev = pdev;
0361 spin_lock_init(&msi_data->lock);
0362
0363 msi_data->irqs_num = MSI_IRQS_PER_MSIR *
0364 (1 << msi_data->cfg->ibs_shift);
0365 msi_data->used = devm_bitmap_zalloc(&pdev->dev, msi_data->irqs_num, GFP_KERNEL);
0366 if (!msi_data->used)
0367 return -ENOMEM;
0368
0369
0370
0371
0372 bitmap_set(msi_data->used, 0, msi_data->irqs_num);
0373
0374 msi_data->msir_num = of_irq_count(pdev->dev.of_node);
0375
0376 if (msi_affinity_flag) {
0377 u32 cpu_num;
0378
0379 cpu_num = num_possible_cpus();
0380 if (msi_data->msir_num >= cpu_num)
0381 msi_data->msir_num = cpu_num;
0382 else
0383 msi_affinity_flag = 0;
0384 }
0385
0386 msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
0387 sizeof(*msi_data->msir),
0388 GFP_KERNEL);
0389 if (!msi_data->msir)
0390 return -ENOMEM;
0391
0392 for (i = 0; i < msi_data->msir_num; i++)
0393 ls_scfg_msi_setup_hwirq(msi_data, i);
0394
0395 ret = ls_scfg_msi_domains_init(msi_data);
0396 if (ret)
0397 return ret;
0398
0399 platform_set_drvdata(pdev, msi_data);
0400
0401 return 0;
0402 }
0403
0404 static int ls_scfg_msi_remove(struct platform_device *pdev)
0405 {
0406 struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
0407 int i;
0408
0409 for (i = 0; i < msi_data->msir_num; i++)
0410 ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
0411
0412 irq_domain_remove(msi_data->msi_domain);
0413 irq_domain_remove(msi_data->parent);
0414
0415 platform_set_drvdata(pdev, NULL);
0416
0417 return 0;
0418 }
0419
0420 static struct platform_driver ls_scfg_msi_driver = {
0421 .driver = {
0422 .name = "ls-scfg-msi",
0423 .of_match_table = ls_scfg_msi_id,
0424 },
0425 .probe = ls_scfg_msi_probe,
0426 .remove = ls_scfg_msi_remove,
0427 };
0428
0429 module_platform_driver(ls_scfg_msi_driver);
0430
0431 MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@nxp.com>");
0432 MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver");
0433 MODULE_LICENSE("GPL v2");