0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/kernel.h>
0011 #include <linux/init.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/irq.h>
0014 #include <linux/spinlock.h>
0015 #include <asm/irq_cpu.h>
0016 #include <asm/mipsregs.h>
0017 #include <bcm63xx_cpu.h>
0018 #include <bcm63xx_regs.h>
0019 #include <bcm63xx_io.h>
0020 #include <bcm63xx_irq.h>
0021
0022
0023 static DEFINE_SPINLOCK(ipic_lock);
0024 static DEFINE_SPINLOCK(epic_lock);
0025
0026 static u32 irq_stat_addr[2];
0027 static u32 irq_mask_addr[2];
0028 static void (*dispatch_internal)(int cpu);
0029 static int is_ext_irq_cascaded;
0030 static unsigned int ext_irq_count;
0031 static unsigned int ext_irq_start, ext_irq_end;
0032 static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
0033 static void (*internal_irq_mask)(struct irq_data *d);
0034 static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
0035
0036
0037 static inline u32 get_ext_irq_perf_reg(int irq)
0038 {
0039 if (irq < 4)
0040 return ext_irq_cfg_reg1;
0041 return ext_irq_cfg_reg2;
0042 }
0043
0044 static inline void handle_internal(int intbit)
0045 {
0046 if (is_ext_irq_cascaded &&
0047 intbit >= ext_irq_start && intbit <= ext_irq_end)
0048 do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
0049 else
0050 do_IRQ(intbit + IRQ_INTERNAL_BASE);
0051 }
0052
0053 static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
0054 const struct cpumask *m)
0055 {
0056 bool enable = cpu_online(cpu);
0057
0058 #ifdef CONFIG_SMP
0059 if (m)
0060 enable &= cpumask_test_cpu(cpu, m);
0061 else if (irqd_affinity_was_set(d))
0062 enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
0063 #endif
0064 return enable;
0065 }
0066
0067
0068
0069
0070
0071
0072
0073
0074 #define BUILD_IPIC_INTERNAL(width) \
0075 void __dispatch_internal_##width(int cpu) \
0076 { \
0077 u32 pending[width / 32]; \
0078 unsigned int src, tgt; \
0079 bool irqs_pending = false; \
0080 static unsigned int i[2]; \
0081 unsigned int *next = &i[cpu]; \
0082 unsigned long flags; \
0083 \
0084 \
0085 spin_lock_irqsave(&ipic_lock, flags); \
0086 for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \
0087 u32 val; \
0088 \
0089 val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
0090 val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
0091 pending[--tgt] = val; \
0092 \
0093 if (val) \
0094 irqs_pending = true; \
0095 } \
0096 spin_unlock_irqrestore(&ipic_lock, flags); \
0097 \
0098 if (!irqs_pending) \
0099 return; \
0100 \
0101 while (1) { \
0102 unsigned int to_call = *next; \
0103 \
0104 *next = (*next + 1) & (width - 1); \
0105 if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \
0106 handle_internal(to_call); \
0107 break; \
0108 } \
0109 } \
0110 } \
0111 \
0112 static void __internal_irq_mask_##width(struct irq_data *d) \
0113 { \
0114 u32 val; \
0115 unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
0116 unsigned reg = (irq / 32) ^ (width/32 - 1); \
0117 unsigned bit = irq & 0x1f; \
0118 unsigned long flags; \
0119 int cpu; \
0120 \
0121 spin_lock_irqsave(&ipic_lock, flags); \
0122 for_each_present_cpu(cpu) { \
0123 if (!irq_mask_addr[cpu]) \
0124 break; \
0125 \
0126 val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
0127 val &= ~(1 << bit); \
0128 bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
0129 } \
0130 spin_unlock_irqrestore(&ipic_lock, flags); \
0131 } \
0132 \
0133 static void __internal_irq_unmask_##width(struct irq_data *d, \
0134 const struct cpumask *m) \
0135 { \
0136 u32 val; \
0137 unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
0138 unsigned reg = (irq / 32) ^ (width/32 - 1); \
0139 unsigned bit = irq & 0x1f; \
0140 unsigned long flags; \
0141 int cpu; \
0142 \
0143 spin_lock_irqsave(&ipic_lock, flags); \
0144 for_each_present_cpu(cpu) { \
0145 if (!irq_mask_addr[cpu]) \
0146 break; \
0147 \
0148 val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
0149 if (enable_irq_for_cpu(cpu, d, m)) \
0150 val |= (1 << bit); \
0151 else \
0152 val &= ~(1 << bit); \
0153 bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
0154 } \
0155 spin_unlock_irqrestore(&ipic_lock, flags); \
0156 }
0157
0158 BUILD_IPIC_INTERNAL(32);
0159 BUILD_IPIC_INTERNAL(64);
0160
0161 asmlinkage void plat_irq_dispatch(void)
0162 {
0163 u32 cause;
0164
0165 do {
0166 cause = read_c0_cause() & read_c0_status() & ST0_IM;
0167
0168 if (!cause)
0169 break;
0170
0171 if (cause & CAUSEF_IP7)
0172 do_IRQ(7);
0173 if (cause & CAUSEF_IP0)
0174 do_IRQ(0);
0175 if (cause & CAUSEF_IP1)
0176 do_IRQ(1);
0177 if (cause & CAUSEF_IP2)
0178 dispatch_internal(0);
0179 if (is_ext_irq_cascaded) {
0180 if (cause & CAUSEF_IP3)
0181 dispatch_internal(1);
0182 } else {
0183 if (cause & CAUSEF_IP3)
0184 do_IRQ(IRQ_EXT_0);
0185 if (cause & CAUSEF_IP4)
0186 do_IRQ(IRQ_EXT_1);
0187 if (cause & CAUSEF_IP5)
0188 do_IRQ(IRQ_EXT_2);
0189 if (cause & CAUSEF_IP6)
0190 do_IRQ(IRQ_EXT_3);
0191 }
0192 } while (1);
0193 }
0194
0195
0196
0197
0198
0199 static void bcm63xx_internal_irq_mask(struct irq_data *d)
0200 {
0201 internal_irq_mask(d);
0202 }
0203
0204 static void bcm63xx_internal_irq_unmask(struct irq_data *d)
0205 {
0206 internal_irq_unmask(d, NULL);
0207 }
0208
0209
0210
0211
0212
0213 static void bcm63xx_external_irq_mask(struct irq_data *d)
0214 {
0215 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
0216 u32 reg, regaddr;
0217 unsigned long flags;
0218
0219 regaddr = get_ext_irq_perf_reg(irq);
0220 spin_lock_irqsave(&epic_lock, flags);
0221 reg = bcm_perf_readl(regaddr);
0222
0223 if (BCMCPU_IS_6348())
0224 reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
0225 else
0226 reg &= ~EXTIRQ_CFG_MASK(irq % 4);
0227
0228 bcm_perf_writel(reg, regaddr);
0229 spin_unlock_irqrestore(&epic_lock, flags);
0230
0231 if (is_ext_irq_cascaded)
0232 internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
0233 }
0234
0235 static void bcm63xx_external_irq_unmask(struct irq_data *d)
0236 {
0237 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
0238 u32 reg, regaddr;
0239 unsigned long flags;
0240
0241 regaddr = get_ext_irq_perf_reg(irq);
0242 spin_lock_irqsave(&epic_lock, flags);
0243 reg = bcm_perf_readl(regaddr);
0244
0245 if (BCMCPU_IS_6348())
0246 reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
0247 else
0248 reg |= EXTIRQ_CFG_MASK(irq % 4);
0249
0250 bcm_perf_writel(reg, regaddr);
0251 spin_unlock_irqrestore(&epic_lock, flags);
0252
0253 if (is_ext_irq_cascaded)
0254 internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
0255 NULL);
0256 }
0257
0258 static void bcm63xx_external_irq_clear(struct irq_data *d)
0259 {
0260 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
0261 u32 reg, regaddr;
0262 unsigned long flags;
0263
0264 regaddr = get_ext_irq_perf_reg(irq);
0265 spin_lock_irqsave(&epic_lock, flags);
0266 reg = bcm_perf_readl(regaddr);
0267
0268 if (BCMCPU_IS_6348())
0269 reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
0270 else
0271 reg |= EXTIRQ_CFG_CLEAR(irq % 4);
0272
0273 bcm_perf_writel(reg, regaddr);
0274 spin_unlock_irqrestore(&epic_lock, flags);
0275 }
0276
0277 static int bcm63xx_external_irq_set_type(struct irq_data *d,
0278 unsigned int flow_type)
0279 {
0280 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
0281 u32 reg, regaddr;
0282 int levelsense, sense, bothedge;
0283 unsigned long flags;
0284
0285 flow_type &= IRQ_TYPE_SENSE_MASK;
0286
0287 if (flow_type == IRQ_TYPE_NONE)
0288 flow_type = IRQ_TYPE_LEVEL_LOW;
0289
0290 levelsense = sense = bothedge = 0;
0291 switch (flow_type) {
0292 case IRQ_TYPE_EDGE_BOTH:
0293 bothedge = 1;
0294 break;
0295
0296 case IRQ_TYPE_EDGE_RISING:
0297 sense = 1;
0298 break;
0299
0300 case IRQ_TYPE_EDGE_FALLING:
0301 break;
0302
0303 case IRQ_TYPE_LEVEL_HIGH:
0304 levelsense = 1;
0305 sense = 1;
0306 break;
0307
0308 case IRQ_TYPE_LEVEL_LOW:
0309 levelsense = 1;
0310 break;
0311
0312 default:
0313 pr_err("bogus flow type combination given !\n");
0314 return -EINVAL;
0315 }
0316
0317 regaddr = get_ext_irq_perf_reg(irq);
0318 spin_lock_irqsave(&epic_lock, flags);
0319 reg = bcm_perf_readl(regaddr);
0320 irq %= 4;
0321
0322 switch (bcm63xx_get_cpu_id()) {
0323 case BCM6348_CPU_ID:
0324 if (levelsense)
0325 reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
0326 else
0327 reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
0328 if (sense)
0329 reg |= EXTIRQ_CFG_SENSE_6348(irq);
0330 else
0331 reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
0332 if (bothedge)
0333 reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
0334 else
0335 reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
0336 break;
0337
0338 case BCM3368_CPU_ID:
0339 case BCM6328_CPU_ID:
0340 case BCM6338_CPU_ID:
0341 case BCM6345_CPU_ID:
0342 case BCM6358_CPU_ID:
0343 case BCM6362_CPU_ID:
0344 case BCM6368_CPU_ID:
0345 if (levelsense)
0346 reg |= EXTIRQ_CFG_LEVELSENSE(irq);
0347 else
0348 reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
0349 if (sense)
0350 reg |= EXTIRQ_CFG_SENSE(irq);
0351 else
0352 reg &= ~EXTIRQ_CFG_SENSE(irq);
0353 if (bothedge)
0354 reg |= EXTIRQ_CFG_BOTHEDGE(irq);
0355 else
0356 reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
0357 break;
0358 default:
0359 BUG();
0360 }
0361
0362 bcm_perf_writel(reg, regaddr);
0363 spin_unlock_irqrestore(&epic_lock, flags);
0364
0365 irqd_set_trigger_type(d, flow_type);
0366 if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
0367 irq_set_handler_locked(d, handle_level_irq);
0368 else
0369 irq_set_handler_locked(d, handle_edge_irq);
0370
0371 return IRQ_SET_MASK_OK_NOCOPY;
0372 }
0373
0374 #ifdef CONFIG_SMP
0375 static int bcm63xx_internal_set_affinity(struct irq_data *data,
0376 const struct cpumask *dest,
0377 bool force)
0378 {
0379 if (!irqd_irq_disabled(data))
0380 internal_irq_unmask(data, dest);
0381
0382 return 0;
0383 }
0384 #endif
0385
0386 static struct irq_chip bcm63xx_internal_irq_chip = {
0387 .name = "bcm63xx_ipic",
0388 .irq_mask = bcm63xx_internal_irq_mask,
0389 .irq_unmask = bcm63xx_internal_irq_unmask,
0390 };
0391
0392 static struct irq_chip bcm63xx_external_irq_chip = {
0393 .name = "bcm63xx_epic",
0394 .irq_ack = bcm63xx_external_irq_clear,
0395
0396 .irq_mask = bcm63xx_external_irq_mask,
0397 .irq_unmask = bcm63xx_external_irq_unmask,
0398
0399 .irq_set_type = bcm63xx_external_irq_set_type,
0400 };
0401
0402 static void bcm63xx_init_irq(void)
0403 {
0404 int irq_bits;
0405
0406 irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
0407 irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
0408 irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
0409 irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
0410
0411 switch (bcm63xx_get_cpu_id()) {
0412 case BCM3368_CPU_ID:
0413 irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
0414 irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
0415 irq_stat_addr[1] = 0;
0416 irq_mask_addr[1] = 0;
0417 irq_bits = 32;
0418 ext_irq_count = 4;
0419 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
0420 break;
0421 case BCM6328_CPU_ID:
0422 irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
0423 irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
0424 irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
0425 irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
0426 irq_bits = 64;
0427 ext_irq_count = 4;
0428 is_ext_irq_cascaded = 1;
0429 ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
0430 ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
0431 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
0432 break;
0433 case BCM6338_CPU_ID:
0434 irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
0435 irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
0436 irq_stat_addr[1] = 0;
0437 irq_mask_addr[1] = 0;
0438 irq_bits = 32;
0439 ext_irq_count = 4;
0440 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
0441 break;
0442 case BCM6345_CPU_ID:
0443 irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
0444 irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
0445 irq_stat_addr[1] = 0;
0446 irq_mask_addr[1] = 0;
0447 irq_bits = 32;
0448 ext_irq_count = 4;
0449 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
0450 break;
0451 case BCM6348_CPU_ID:
0452 irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
0453 irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
0454 irq_stat_addr[1] = 0;
0455 irq_mask_addr[1] = 0;
0456 irq_bits = 32;
0457 ext_irq_count = 4;
0458 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
0459 break;
0460 case BCM6358_CPU_ID:
0461 irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
0462 irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
0463 irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
0464 irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
0465 irq_bits = 32;
0466 ext_irq_count = 4;
0467 is_ext_irq_cascaded = 1;
0468 ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
0469 ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
0470 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
0471 break;
0472 case BCM6362_CPU_ID:
0473 irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
0474 irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
0475 irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
0476 irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
0477 irq_bits = 64;
0478 ext_irq_count = 4;
0479 is_ext_irq_cascaded = 1;
0480 ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
0481 ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
0482 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
0483 break;
0484 case BCM6368_CPU_ID:
0485 irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
0486 irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
0487 irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
0488 irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
0489 irq_bits = 64;
0490 ext_irq_count = 6;
0491 is_ext_irq_cascaded = 1;
0492 ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
0493 ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
0494 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
0495 ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
0496 break;
0497 default:
0498 BUG();
0499 }
0500
0501 if (irq_bits == 32) {
0502 dispatch_internal = __dispatch_internal_32;
0503 internal_irq_mask = __internal_irq_mask_32;
0504 internal_irq_unmask = __internal_irq_unmask_32;
0505 } else {
0506 dispatch_internal = __dispatch_internal_64;
0507 internal_irq_mask = __internal_irq_mask_64;
0508 internal_irq_unmask = __internal_irq_unmask_64;
0509 }
0510 }
0511
0512 void __init arch_init_irq(void)
0513 {
0514 int i, irq;
0515
0516 bcm63xx_init_irq();
0517 mips_cpu_irq_init();
0518 for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
0519 irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
0520 handle_level_irq);
0521
0522 for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
0523 irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
0524 handle_edge_irq);
0525
0526 if (!is_ext_irq_cascaded) {
0527 for (i = 3; i < 3 + ext_irq_count; ++i) {
0528 irq = MIPS_CPU_IRQ_BASE + i;
0529 if (request_irq(irq, no_action, IRQF_NO_THREAD,
0530 "cascade_extirq", NULL)) {
0531 pr_err("Failed to request irq %d (cascade_extirq)\n",
0532 irq);
0533 }
0534 }
0535 }
0536
0537 irq = MIPS_CPU_IRQ_BASE + 2;
0538 if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade_ip2", NULL))
0539 pr_err("Failed to request irq %d (cascade_ip2)\n", irq);
0540 #ifdef CONFIG_SMP
0541 if (is_ext_irq_cascaded) {
0542 irq = MIPS_CPU_IRQ_BASE + 3;
0543 if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade_ip3",
0544 NULL))
0545 pr_err("Failed to request irq %d (cascade_ip3)\n", irq);
0546 bcm63xx_internal_irq_chip.irq_set_affinity =
0547 bcm63xx_internal_set_affinity;
0548
0549 cpumask_clear(irq_default_affinity);
0550 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
0551 }
0552 #endif
0553 }