0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/module.h>
0013 #include <linux/init.h>
0014 #include <linux/irq.h>
0015 #include <linux/irqchip.h>
0016 #include <linux/irqchip/chained_irq.h>
0017 #include <linux/irqdomain.h>
0018 #include <linux/io.h>
0019 #include <linux/ioport.h>
0020 #include <linux/of_address.h>
0021 #include <linux/of_irq.h>
0022
0023 #include <asm/exception.h>
0024 #include <asm/hardirq.h>
0025
0026 #define MAX_ICU_NR 16
0027
0028 #define PJ1_INT_SEL 0x10c
0029 #define PJ4_INT_SEL 0x104
0030
0031
0032 #define SEL_INT_PENDING (1 << 6)
0033 #define SEL_INT_NUM_MASK 0x3f
0034
0035 #define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5)
0036 #define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6)
0037
0038 struct icu_chip_data {
0039 int nr_irqs;
0040 unsigned int virq_base;
0041 unsigned int cascade_irq;
0042 void __iomem *reg_status;
0043 void __iomem *reg_mask;
0044 unsigned int conf_enable;
0045 unsigned int conf_disable;
0046 unsigned int conf_mask;
0047 unsigned int conf2_mask;
0048 unsigned int clr_mfp_irq_base;
0049 unsigned int clr_mfp_hwirq;
0050 struct irq_domain *domain;
0051 };
0052
0053 struct mmp_intc_conf {
0054 unsigned int conf_enable;
0055 unsigned int conf_disable;
0056 unsigned int conf_mask;
0057 unsigned int conf2_mask;
0058 };
0059
0060 static void __iomem *mmp_icu_base;
0061 static void __iomem *mmp_icu2_base;
0062 static struct icu_chip_data icu_data[MAX_ICU_NR];
0063 static int max_icu_nr;
0064
0065 extern void mmp2_clear_pmic_int(void);
0066
0067 static void icu_mask_ack_irq(struct irq_data *d)
0068 {
0069 struct irq_domain *domain = d->domain;
0070 struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
0071 int hwirq;
0072 u32 r;
0073
0074 hwirq = d->irq - data->virq_base;
0075 if (data == &icu_data[0]) {
0076 r = readl_relaxed(mmp_icu_base + (hwirq << 2));
0077 r &= ~data->conf_mask;
0078 r |= data->conf_disable;
0079 writel_relaxed(r, mmp_icu_base + (hwirq << 2));
0080 } else {
0081 #ifdef CONFIG_CPU_MMP2
0082 if ((data->virq_base == data->clr_mfp_irq_base)
0083 && (hwirq == data->clr_mfp_hwirq))
0084 mmp2_clear_pmic_int();
0085 #endif
0086 r = readl_relaxed(data->reg_mask) | (1 << hwirq);
0087 writel_relaxed(r, data->reg_mask);
0088 }
0089 }
0090
0091 static void icu_mask_irq(struct irq_data *d)
0092 {
0093 struct irq_domain *domain = d->domain;
0094 struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
0095 int hwirq;
0096 u32 r;
0097
0098 hwirq = d->irq - data->virq_base;
0099 if (data == &icu_data[0]) {
0100 r = readl_relaxed(mmp_icu_base + (hwirq << 2));
0101 r &= ~data->conf_mask;
0102 r |= data->conf_disable;
0103 writel_relaxed(r, mmp_icu_base + (hwirq << 2));
0104
0105 if (data->conf2_mask) {
0106
0107
0108
0109
0110 r = readl_relaxed(mmp_icu2_base + (hwirq << 2));
0111 r &= ~data->conf2_mask;
0112 writel_relaxed(r, mmp_icu2_base + (hwirq << 2));
0113 }
0114 } else {
0115 r = readl_relaxed(data->reg_mask) | (1 << hwirq);
0116 writel_relaxed(r, data->reg_mask);
0117 }
0118 }
0119
0120 static void icu_unmask_irq(struct irq_data *d)
0121 {
0122 struct irq_domain *domain = d->domain;
0123 struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
0124 int hwirq;
0125 u32 r;
0126
0127 hwirq = d->irq - data->virq_base;
0128 if (data == &icu_data[0]) {
0129 r = readl_relaxed(mmp_icu_base + (hwirq << 2));
0130 r &= ~data->conf_mask;
0131 r |= data->conf_enable;
0132 writel_relaxed(r, mmp_icu_base + (hwirq << 2));
0133 } else {
0134 r = readl_relaxed(data->reg_mask) & ~(1 << hwirq);
0135 writel_relaxed(r, data->reg_mask);
0136 }
0137 }
0138
0139 struct irq_chip icu_irq_chip = {
0140 .name = "icu_irq",
0141 .irq_mask = icu_mask_irq,
0142 .irq_mask_ack = icu_mask_ack_irq,
0143 .irq_unmask = icu_unmask_irq,
0144 };
0145
0146 static void icu_mux_irq_demux(struct irq_desc *desc)
0147 {
0148 unsigned int irq = irq_desc_get_irq(desc);
0149 struct irq_chip *chip = irq_desc_get_chip(desc);
0150 struct irq_domain *domain;
0151 struct icu_chip_data *data;
0152 int i;
0153 unsigned long mask, status, n;
0154
0155 chained_irq_enter(chip, desc);
0156
0157 for (i = 1; i < max_icu_nr; i++) {
0158 if (irq == icu_data[i].cascade_irq) {
0159 domain = icu_data[i].domain;
0160 data = (struct icu_chip_data *)domain->host_data;
0161 break;
0162 }
0163 }
0164 if (i >= max_icu_nr) {
0165 pr_err("Spurious irq %d in MMP INTC\n", irq);
0166 goto out;
0167 }
0168
0169 mask = readl_relaxed(data->reg_mask);
0170 while (1) {
0171 status = readl_relaxed(data->reg_status) & ~mask;
0172 if (status == 0)
0173 break;
0174 for_each_set_bit(n, &status, BITS_PER_LONG) {
0175 generic_handle_irq(icu_data[i].virq_base + n);
0176 }
0177 }
0178
0179 out:
0180 chained_irq_exit(chip, desc);
0181 }
0182
0183 static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq,
0184 irq_hw_number_t hw)
0185 {
0186 irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
0187 return 0;
0188 }
0189
0190 static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
0191 const u32 *intspec, unsigned int intsize,
0192 unsigned long *out_hwirq,
0193 unsigned int *out_type)
0194 {
0195 *out_hwirq = intspec[0];
0196 return 0;
0197 }
0198
0199 static const struct irq_domain_ops mmp_irq_domain_ops = {
0200 .map = mmp_irq_domain_map,
0201 .xlate = mmp_irq_domain_xlate,
0202 };
0203
0204 static const struct mmp_intc_conf mmp_conf = {
0205 .conf_enable = 0x51,
0206 .conf_disable = 0x0,
0207 .conf_mask = 0x7f,
0208 };
0209
0210 static const struct mmp_intc_conf mmp2_conf = {
0211 .conf_enable = 0x20,
0212 .conf_disable = 0x0,
0213 .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
0214 MMP2_ICU_INT_ROUTE_PJ4_FIQ,
0215 };
0216
0217 static struct mmp_intc_conf mmp3_conf = {
0218 .conf_enable = 0x20,
0219 .conf_disable = 0x0,
0220 .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
0221 MMP2_ICU_INT_ROUTE_PJ4_FIQ,
0222 .conf2_mask = 0xf0,
0223 };
0224
0225 static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
0226 {
0227 int hwirq;
0228
0229 hwirq = readl_relaxed(mmp_icu_base + PJ1_INT_SEL);
0230 if (!(hwirq & SEL_INT_PENDING))
0231 return;
0232 hwirq &= SEL_INT_NUM_MASK;
0233 generic_handle_domain_irq(icu_data[0].domain, hwirq);
0234 }
0235
0236 static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
0237 {
0238 int hwirq;
0239
0240 hwirq = readl_relaxed(mmp_icu_base + PJ4_INT_SEL);
0241 if (!(hwirq & SEL_INT_PENDING))
0242 return;
0243 hwirq &= SEL_INT_NUM_MASK;
0244 generic_handle_domain_irq(icu_data[0].domain, hwirq);
0245 }
0246
0247
0248 void __init icu_init_irq(void)
0249 {
0250 int irq;
0251
0252 max_icu_nr = 1;
0253 mmp_icu_base = ioremap(0xd4282000, 0x1000);
0254 icu_data[0].conf_enable = mmp_conf.conf_enable;
0255 icu_data[0].conf_disable = mmp_conf.conf_disable;
0256 icu_data[0].conf_mask = mmp_conf.conf_mask;
0257 icu_data[0].nr_irqs = 64;
0258 icu_data[0].virq_base = 0;
0259 icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
0260 &irq_domain_simple_ops,
0261 &icu_data[0]);
0262 for (irq = 0; irq < 64; irq++) {
0263 icu_mask_irq(irq_get_irq_data(irq));
0264 irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
0265 }
0266 irq_set_default_host(icu_data[0].domain);
0267 set_handle_irq(mmp_handle_irq);
0268 }
0269
0270
0271 void __init mmp2_init_icu(void)
0272 {
0273 int irq, end;
0274
0275 max_icu_nr = 8;
0276 mmp_icu_base = ioremap(0xd4282000, 0x1000);
0277 icu_data[0].conf_enable = mmp2_conf.conf_enable;
0278 icu_data[0].conf_disable = mmp2_conf.conf_disable;
0279 icu_data[0].conf_mask = mmp2_conf.conf_mask;
0280 icu_data[0].nr_irqs = 64;
0281 icu_data[0].virq_base = 0;
0282 icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
0283 &irq_domain_simple_ops,
0284 &icu_data[0]);
0285 icu_data[1].reg_status = mmp_icu_base + 0x150;
0286 icu_data[1].reg_mask = mmp_icu_base + 0x168;
0287 icu_data[1].clr_mfp_irq_base = icu_data[0].virq_base +
0288 icu_data[0].nr_irqs;
0289 icu_data[1].clr_mfp_hwirq = 1;
0290 icu_data[1].nr_irqs = 2;
0291 icu_data[1].cascade_irq = 4;
0292 icu_data[1].virq_base = icu_data[0].virq_base + icu_data[0].nr_irqs;
0293 icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
0294 icu_data[1].virq_base, 0,
0295 &irq_domain_simple_ops,
0296 &icu_data[1]);
0297 icu_data[2].reg_status = mmp_icu_base + 0x154;
0298 icu_data[2].reg_mask = mmp_icu_base + 0x16c;
0299 icu_data[2].nr_irqs = 2;
0300 icu_data[2].cascade_irq = 5;
0301 icu_data[2].virq_base = icu_data[1].virq_base + icu_data[1].nr_irqs;
0302 icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
0303 icu_data[2].virq_base, 0,
0304 &irq_domain_simple_ops,
0305 &icu_data[2]);
0306 icu_data[3].reg_status = mmp_icu_base + 0x180;
0307 icu_data[3].reg_mask = mmp_icu_base + 0x17c;
0308 icu_data[3].nr_irqs = 3;
0309 icu_data[3].cascade_irq = 9;
0310 icu_data[3].virq_base = icu_data[2].virq_base + icu_data[2].nr_irqs;
0311 icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
0312 icu_data[3].virq_base, 0,
0313 &irq_domain_simple_ops,
0314 &icu_data[3]);
0315 icu_data[4].reg_status = mmp_icu_base + 0x158;
0316 icu_data[4].reg_mask = mmp_icu_base + 0x170;
0317 icu_data[4].nr_irqs = 5;
0318 icu_data[4].cascade_irq = 17;
0319 icu_data[4].virq_base = icu_data[3].virq_base + icu_data[3].nr_irqs;
0320 icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
0321 icu_data[4].virq_base, 0,
0322 &irq_domain_simple_ops,
0323 &icu_data[4]);
0324 icu_data[5].reg_status = mmp_icu_base + 0x15c;
0325 icu_data[5].reg_mask = mmp_icu_base + 0x174;
0326 icu_data[5].nr_irqs = 15;
0327 icu_data[5].cascade_irq = 35;
0328 icu_data[5].virq_base = icu_data[4].virq_base + icu_data[4].nr_irqs;
0329 icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
0330 icu_data[5].virq_base, 0,
0331 &irq_domain_simple_ops,
0332 &icu_data[5]);
0333 icu_data[6].reg_status = mmp_icu_base + 0x160;
0334 icu_data[6].reg_mask = mmp_icu_base + 0x178;
0335 icu_data[6].nr_irqs = 2;
0336 icu_data[6].cascade_irq = 51;
0337 icu_data[6].virq_base = icu_data[5].virq_base + icu_data[5].nr_irqs;
0338 icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
0339 icu_data[6].virq_base, 0,
0340 &irq_domain_simple_ops,
0341 &icu_data[6]);
0342 icu_data[7].reg_status = mmp_icu_base + 0x188;
0343 icu_data[7].reg_mask = mmp_icu_base + 0x184;
0344 icu_data[7].nr_irqs = 2;
0345 icu_data[7].cascade_irq = 55;
0346 icu_data[7].virq_base = icu_data[6].virq_base + icu_data[6].nr_irqs;
0347 icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
0348 icu_data[7].virq_base, 0,
0349 &irq_domain_simple_ops,
0350 &icu_data[7]);
0351 end = icu_data[7].virq_base + icu_data[7].nr_irqs;
0352 for (irq = 0; irq < end; irq++) {
0353 icu_mask_irq(irq_get_irq_data(irq));
0354 if (irq == icu_data[1].cascade_irq ||
0355 irq == icu_data[2].cascade_irq ||
0356 irq == icu_data[3].cascade_irq ||
0357 irq == icu_data[4].cascade_irq ||
0358 irq == icu_data[5].cascade_irq ||
0359 irq == icu_data[6].cascade_irq ||
0360 irq == icu_data[7].cascade_irq) {
0361 irq_set_chip(irq, &icu_irq_chip);
0362 irq_set_chained_handler(irq, icu_mux_irq_demux);
0363 } else {
0364 irq_set_chip_and_handler(irq, &icu_irq_chip,
0365 handle_level_irq);
0366 }
0367 }
0368 irq_set_default_host(icu_data[0].domain);
0369 set_handle_irq(mmp2_handle_irq);
0370 }
0371
0372 #ifdef CONFIG_OF
0373 static int __init mmp_init_bases(struct device_node *node)
0374 {
0375 int ret, nr_irqs, irq, i = 0;
0376
0377 ret = of_property_read_u32(node, "mrvl,intc-nr-irqs", &nr_irqs);
0378 if (ret) {
0379 pr_err("Not found mrvl,intc-nr-irqs property\n");
0380 return ret;
0381 }
0382
0383 mmp_icu_base = of_iomap(node, 0);
0384 if (!mmp_icu_base) {
0385 pr_err("Failed to get interrupt controller register\n");
0386 return -ENOMEM;
0387 }
0388
0389 icu_data[0].virq_base = 0;
0390 icu_data[0].domain = irq_domain_add_linear(node, nr_irqs,
0391 &mmp_irq_domain_ops,
0392 &icu_data[0]);
0393 for (irq = 0; irq < nr_irqs; irq++) {
0394 ret = irq_create_mapping(icu_data[0].domain, irq);
0395 if (!ret) {
0396 pr_err("Failed to mapping hwirq\n");
0397 goto err;
0398 }
0399 if (!irq)
0400 icu_data[0].virq_base = ret;
0401 }
0402 icu_data[0].nr_irqs = nr_irqs;
0403 return 0;
0404 err:
0405 if (icu_data[0].virq_base) {
0406 for (i = 0; i < irq; i++)
0407 irq_dispose_mapping(icu_data[0].virq_base + i);
0408 }
0409 irq_domain_remove(icu_data[0].domain);
0410 iounmap(mmp_icu_base);
0411 return -EINVAL;
0412 }
0413
0414 static int __init mmp_of_init(struct device_node *node,
0415 struct device_node *parent)
0416 {
0417 int ret;
0418
0419 ret = mmp_init_bases(node);
0420 if (ret < 0)
0421 return ret;
0422
0423 icu_data[0].conf_enable = mmp_conf.conf_enable;
0424 icu_data[0].conf_disable = mmp_conf.conf_disable;
0425 icu_data[0].conf_mask = mmp_conf.conf_mask;
0426 set_handle_irq(mmp_handle_irq);
0427 max_icu_nr = 1;
0428 return 0;
0429 }
0430 IRQCHIP_DECLARE(mmp_intc, "mrvl,mmp-intc", mmp_of_init);
0431
0432 static int __init mmp2_of_init(struct device_node *node,
0433 struct device_node *parent)
0434 {
0435 int ret;
0436
0437 ret = mmp_init_bases(node);
0438 if (ret < 0)
0439 return ret;
0440
0441 icu_data[0].conf_enable = mmp2_conf.conf_enable;
0442 icu_data[0].conf_disable = mmp2_conf.conf_disable;
0443 icu_data[0].conf_mask = mmp2_conf.conf_mask;
0444 set_handle_irq(mmp2_handle_irq);
0445 max_icu_nr = 1;
0446 return 0;
0447 }
0448 IRQCHIP_DECLARE(mmp2_intc, "mrvl,mmp2-intc", mmp2_of_init);
0449
0450 static int __init mmp3_of_init(struct device_node *node,
0451 struct device_node *parent)
0452 {
0453 int ret;
0454
0455 mmp_icu2_base = of_iomap(node, 1);
0456 if (!mmp_icu2_base) {
0457 pr_err("Failed to get interrupt controller register #2\n");
0458 return -ENODEV;
0459 }
0460
0461 ret = mmp_init_bases(node);
0462 if (ret < 0) {
0463 iounmap(mmp_icu2_base);
0464 return ret;
0465 }
0466
0467 icu_data[0].conf_enable = mmp3_conf.conf_enable;
0468 icu_data[0].conf_disable = mmp3_conf.conf_disable;
0469 icu_data[0].conf_mask = mmp3_conf.conf_mask;
0470 icu_data[0].conf2_mask = mmp3_conf.conf2_mask;
0471
0472 if (!parent) {
0473
0474 set_handle_irq(mmp2_handle_irq);
0475 }
0476
0477 max_icu_nr = 1;
0478 return 0;
0479 }
0480 IRQCHIP_DECLARE(mmp3_intc, "marvell,mmp3-intc", mmp3_of_init);
0481
0482 static int __init mmp2_mux_of_init(struct device_node *node,
0483 struct device_node *parent)
0484 {
0485 int i, ret, irq, j = 0;
0486 u32 nr_irqs, mfp_irq;
0487 u32 reg[4];
0488
0489 if (!parent)
0490 return -ENODEV;
0491
0492 i = max_icu_nr;
0493 ret = of_property_read_u32(node, "mrvl,intc-nr-irqs",
0494 &nr_irqs);
0495 if (ret) {
0496 pr_err("Not found mrvl,intc-nr-irqs property\n");
0497 return -EINVAL;
0498 }
0499
0500
0501
0502
0503
0504
0505
0506 ret = of_property_read_variable_u32_array(node, "reg", reg,
0507 ARRAY_SIZE(reg),
0508 ARRAY_SIZE(reg));
0509 if (ret < 0) {
0510 pr_err("Not found reg property\n");
0511 return -EINVAL;
0512 }
0513 icu_data[i].reg_status = mmp_icu_base + reg[0];
0514 icu_data[i].reg_mask = mmp_icu_base + reg[2];
0515 icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0);
0516 if (!icu_data[i].cascade_irq)
0517 return -EINVAL;
0518
0519 icu_data[i].virq_base = 0;
0520 icu_data[i].domain = irq_domain_add_linear(node, nr_irqs,
0521 &mmp_irq_domain_ops,
0522 &icu_data[i]);
0523 for (irq = 0; irq < nr_irqs; irq++) {
0524 ret = irq_create_mapping(icu_data[i].domain, irq);
0525 if (!ret) {
0526 pr_err("Failed to mapping hwirq\n");
0527 goto err;
0528 }
0529 if (!irq)
0530 icu_data[i].virq_base = ret;
0531 }
0532 icu_data[i].nr_irqs = nr_irqs;
0533 if (!of_property_read_u32(node, "mrvl,clr-mfp-irq",
0534 &mfp_irq)) {
0535 icu_data[i].clr_mfp_irq_base = icu_data[i].virq_base;
0536 icu_data[i].clr_mfp_hwirq = mfp_irq;
0537 }
0538 irq_set_chained_handler(icu_data[i].cascade_irq,
0539 icu_mux_irq_demux);
0540 max_icu_nr++;
0541 return 0;
0542 err:
0543 if (icu_data[i].virq_base) {
0544 for (j = 0; j < irq; j++)
0545 irq_dispose_mapping(icu_data[i].virq_base + j);
0546 }
0547 irq_domain_remove(icu_data[i].domain);
0548 return -EINVAL;
0549 }
0550 IRQCHIP_DECLARE(mmp2_mux_intc, "mrvl,mmp2-mux-intc", mmp2_mux_of_init);
0551 #endif