0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #define pr_fmt(fmt) "irq-mips-gic: " fmt
0011
0012 #include <linux/bitfield.h>
0013 #include <linux/bitmap.h>
0014 #include <linux/clocksource.h>
0015 #include <linux/cpuhotplug.h>
0016 #include <linux/init.h>
0017 #include <linux/interrupt.h>
0018 #include <linux/irq.h>
0019 #include <linux/irqchip.h>
0020 #include <linux/irqdomain.h>
0021 #include <linux/of_address.h>
0022 #include <linux/percpu.h>
0023 #include <linux/sched.h>
0024 #include <linux/smp.h>
0025
0026 #include <asm/mips-cps.h>
0027 #include <asm/setup.h>
0028 #include <asm/traps.h>
0029
0030 #include <dt-bindings/interrupt-controller/mips-gic.h>
0031
0032 #define GIC_MAX_INTRS 256
0033 #define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS)
0034
0035
0036 #define GIC_CPU_PIN_OFFSET 2
0037
0038
0039 #define GIC_PIN_TO_VEC_OFFSET 1
0040
0041
0042 #define GIC_LOCAL_HWIRQ_BASE 0
0043 #define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
0044 #define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
0045 #define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
0046 #define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
0047 #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
0048
0049 void __iomem *mips_gic_base;
0050
0051 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
0052
0053 static DEFINE_SPINLOCK(gic_lock);
0054 static struct irq_domain *gic_irq_domain;
0055 static int gic_shared_intrs;
0056 static unsigned int gic_cpu_pin;
0057 static unsigned int timer_cpu_pin;
0058 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
0059
0060 #ifdef CONFIG_GENERIC_IRQ_IPI
0061 static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
0062 static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
0063 #endif
0064
0065 static struct gic_all_vpes_chip_data {
0066 u32 map;
0067 bool mask;
0068 } gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS];
0069
0070 static void gic_clear_pcpu_masks(unsigned int intr)
0071 {
0072 unsigned int i;
0073
0074
0075 for_each_possible_cpu(i)
0076 clear_bit(intr, per_cpu_ptr(pcpu_masks, i));
0077 }
0078
0079 static bool gic_local_irq_is_routable(int intr)
0080 {
0081 u32 vpe_ctl;
0082
0083
0084 if (cpu_has_veic)
0085 return true;
0086
0087 vpe_ctl = read_gic_vl_ctl();
0088 switch (intr) {
0089 case GIC_LOCAL_INT_TIMER:
0090 return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE;
0091 case GIC_LOCAL_INT_PERFCTR:
0092 return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE;
0093 case GIC_LOCAL_INT_FDC:
0094 return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE;
0095 case GIC_LOCAL_INT_SWINT0:
0096 case GIC_LOCAL_INT_SWINT1:
0097 return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE;
0098 default:
0099 return true;
0100 }
0101 }
0102
0103 static void gic_bind_eic_interrupt(int irq, int set)
0104 {
0105
0106 irq -= GIC_PIN_TO_VEC_OFFSET;
0107
0108
0109 write_gic_vl_eic_shadow_set(irq, set);
0110 }
0111
0112 static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
0113 {
0114 irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
0115
0116 write_gic_wedge(GIC_WEDGE_RW | hwirq);
0117 }
0118
0119 int gic_get_c0_compare_int(void)
0120 {
0121 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
0122 return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
0123 return irq_create_mapping(gic_irq_domain,
0124 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
0125 }
0126
0127 int gic_get_c0_perfcount_int(void)
0128 {
0129 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
0130
0131 if (cp0_perfcount_irq < 0)
0132 return -1;
0133 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
0134 }
0135 return irq_create_mapping(gic_irq_domain,
0136 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
0137 }
0138
0139 int gic_get_c0_fdc_int(void)
0140 {
0141 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
0142
0143 if (cp0_fdc_irq < 0)
0144 return -1;
0145 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
0146 }
0147
0148 return irq_create_mapping(gic_irq_domain,
0149 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
0150 }
0151
0152 static void gic_handle_shared_int(bool chained)
0153 {
0154 unsigned int intr;
0155 unsigned long *pcpu_mask;
0156 DECLARE_BITMAP(pending, GIC_MAX_INTRS);
0157
0158
0159 pcpu_mask = this_cpu_ptr(pcpu_masks);
0160
0161 if (mips_cm_is64)
0162 __ioread64_copy(pending, addr_gic_pend(),
0163 DIV_ROUND_UP(gic_shared_intrs, 64));
0164 else
0165 __ioread32_copy(pending, addr_gic_pend(),
0166 DIV_ROUND_UP(gic_shared_intrs, 32));
0167
0168 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
0169
0170 for_each_set_bit(intr, pending, gic_shared_intrs) {
0171 if (chained)
0172 generic_handle_domain_irq(gic_irq_domain,
0173 GIC_SHARED_TO_HWIRQ(intr));
0174 else
0175 do_domain_IRQ(gic_irq_domain,
0176 GIC_SHARED_TO_HWIRQ(intr));
0177 }
0178 }
0179
0180 static void gic_mask_irq(struct irq_data *d)
0181 {
0182 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
0183
0184 write_gic_rmask(intr);
0185 gic_clear_pcpu_masks(intr);
0186 }
0187
0188 static void gic_unmask_irq(struct irq_data *d)
0189 {
0190 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
0191 unsigned int cpu;
0192
0193 write_gic_smask(intr);
0194
0195 gic_clear_pcpu_masks(intr);
0196 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
0197 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
0198 }
0199
0200 static void gic_ack_irq(struct irq_data *d)
0201 {
0202 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
0203
0204 write_gic_wedge(irq);
0205 }
0206
0207 static int gic_set_type(struct irq_data *d, unsigned int type)
0208 {
0209 unsigned int irq, pol, trig, dual;
0210 unsigned long flags;
0211
0212 irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
0213
0214 spin_lock_irqsave(&gic_lock, flags);
0215 switch (type & IRQ_TYPE_SENSE_MASK) {
0216 case IRQ_TYPE_EDGE_FALLING:
0217 pol = GIC_POL_FALLING_EDGE;
0218 trig = GIC_TRIG_EDGE;
0219 dual = GIC_DUAL_SINGLE;
0220 break;
0221 case IRQ_TYPE_EDGE_RISING:
0222 pol = GIC_POL_RISING_EDGE;
0223 trig = GIC_TRIG_EDGE;
0224 dual = GIC_DUAL_SINGLE;
0225 break;
0226 case IRQ_TYPE_EDGE_BOTH:
0227 pol = 0;
0228 trig = GIC_TRIG_EDGE;
0229 dual = GIC_DUAL_DUAL;
0230 break;
0231 case IRQ_TYPE_LEVEL_LOW:
0232 pol = GIC_POL_ACTIVE_LOW;
0233 trig = GIC_TRIG_LEVEL;
0234 dual = GIC_DUAL_SINGLE;
0235 break;
0236 case IRQ_TYPE_LEVEL_HIGH:
0237 default:
0238 pol = GIC_POL_ACTIVE_HIGH;
0239 trig = GIC_TRIG_LEVEL;
0240 dual = GIC_DUAL_SINGLE;
0241 break;
0242 }
0243
0244 change_gic_pol(irq, pol);
0245 change_gic_trig(irq, trig);
0246 change_gic_dual(irq, dual);
0247
0248 if (trig == GIC_TRIG_EDGE)
0249 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
0250 handle_edge_irq, NULL);
0251 else
0252 irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
0253 handle_level_irq, NULL);
0254 spin_unlock_irqrestore(&gic_lock, flags);
0255
0256 return 0;
0257 }
0258
0259 #ifdef CONFIG_SMP
0260 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
0261 bool force)
0262 {
0263 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
0264 unsigned long flags;
0265 unsigned int cpu;
0266
0267 cpu = cpumask_first_and(cpumask, cpu_online_mask);
0268 if (cpu >= NR_CPUS)
0269 return -EINVAL;
0270
0271
0272 spin_lock_irqsave(&gic_lock, flags);
0273
0274
0275 write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
0276
0277
0278 gic_clear_pcpu_masks(irq);
0279 if (read_gic_mask(irq))
0280 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
0281
0282 irq_data_update_effective_affinity(d, cpumask_of(cpu));
0283 spin_unlock_irqrestore(&gic_lock, flags);
0284
0285 return IRQ_SET_MASK_OK;
0286 }
0287 #endif
0288
0289 static struct irq_chip gic_level_irq_controller = {
0290 .name = "MIPS GIC",
0291 .irq_mask = gic_mask_irq,
0292 .irq_unmask = gic_unmask_irq,
0293 .irq_set_type = gic_set_type,
0294 #ifdef CONFIG_SMP
0295 .irq_set_affinity = gic_set_affinity,
0296 #endif
0297 };
0298
0299 static struct irq_chip gic_edge_irq_controller = {
0300 .name = "MIPS GIC",
0301 .irq_ack = gic_ack_irq,
0302 .irq_mask = gic_mask_irq,
0303 .irq_unmask = gic_unmask_irq,
0304 .irq_set_type = gic_set_type,
0305 #ifdef CONFIG_SMP
0306 .irq_set_affinity = gic_set_affinity,
0307 #endif
0308 .ipi_send_single = gic_send_ipi,
0309 };
0310
0311 static void gic_handle_local_int(bool chained)
0312 {
0313 unsigned long pending, masked;
0314 unsigned int intr;
0315
0316 pending = read_gic_vl_pend();
0317 masked = read_gic_vl_mask();
0318
0319 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
0320
0321 for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
0322 if (chained)
0323 generic_handle_domain_irq(gic_irq_domain,
0324 GIC_LOCAL_TO_HWIRQ(intr));
0325 else
0326 do_domain_IRQ(gic_irq_domain,
0327 GIC_LOCAL_TO_HWIRQ(intr));
0328 }
0329 }
0330
0331 static void gic_mask_local_irq(struct irq_data *d)
0332 {
0333 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
0334
0335 write_gic_vl_rmask(BIT(intr));
0336 }
0337
0338 static void gic_unmask_local_irq(struct irq_data *d)
0339 {
0340 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
0341
0342 write_gic_vl_smask(BIT(intr));
0343 }
0344
0345 static struct irq_chip gic_local_irq_controller = {
0346 .name = "MIPS GIC Local",
0347 .irq_mask = gic_mask_local_irq,
0348 .irq_unmask = gic_unmask_local_irq,
0349 };
0350
0351 static void gic_mask_local_irq_all_vpes(struct irq_data *d)
0352 {
0353 struct gic_all_vpes_chip_data *cd;
0354 unsigned long flags;
0355 int intr, cpu;
0356
0357 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
0358 cd = irq_data_get_irq_chip_data(d);
0359 cd->mask = false;
0360
0361 spin_lock_irqsave(&gic_lock, flags);
0362 for_each_online_cpu(cpu) {
0363 write_gic_vl_other(mips_cm_vp_id(cpu));
0364 write_gic_vo_rmask(BIT(intr));
0365 }
0366 spin_unlock_irqrestore(&gic_lock, flags);
0367 }
0368
0369 static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
0370 {
0371 struct gic_all_vpes_chip_data *cd;
0372 unsigned long flags;
0373 int intr, cpu;
0374
0375 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
0376 cd = irq_data_get_irq_chip_data(d);
0377 cd->mask = true;
0378
0379 spin_lock_irqsave(&gic_lock, flags);
0380 for_each_online_cpu(cpu) {
0381 write_gic_vl_other(mips_cm_vp_id(cpu));
0382 write_gic_vo_smask(BIT(intr));
0383 }
0384 spin_unlock_irqrestore(&gic_lock, flags);
0385 }
0386
0387 static void gic_all_vpes_irq_cpu_online(void)
0388 {
0389 static const unsigned int local_intrs[] = {
0390 GIC_LOCAL_INT_TIMER,
0391 GIC_LOCAL_INT_PERFCTR,
0392 GIC_LOCAL_INT_FDC,
0393 };
0394 unsigned long flags;
0395 int i;
0396
0397 spin_lock_irqsave(&gic_lock, flags);
0398
0399 for (i = 0; i < ARRAY_SIZE(local_intrs); i++) {
0400 unsigned int intr = local_intrs[i];
0401 struct gic_all_vpes_chip_data *cd;
0402
0403 cd = &gic_all_vpes_chip_data[intr];
0404 write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
0405 if (cd->mask)
0406 write_gic_vl_smask(BIT(intr));
0407 }
0408
0409 spin_unlock_irqrestore(&gic_lock, flags);
0410 }
0411
0412 static struct irq_chip gic_all_vpes_local_irq_controller = {
0413 .name = "MIPS GIC Local",
0414 .irq_mask = gic_mask_local_irq_all_vpes,
0415 .irq_unmask = gic_unmask_local_irq_all_vpes,
0416 };
0417
0418 static void __gic_irq_dispatch(void)
0419 {
0420 gic_handle_local_int(false);
0421 gic_handle_shared_int(false);
0422 }
0423
0424 static void gic_irq_dispatch(struct irq_desc *desc)
0425 {
0426 gic_handle_local_int(true);
0427 gic_handle_shared_int(true);
0428 }
0429
0430 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
0431 irq_hw_number_t hw, unsigned int cpu)
0432 {
0433 int intr = GIC_HWIRQ_TO_SHARED(hw);
0434 struct irq_data *data;
0435 unsigned long flags;
0436
0437 data = irq_get_irq_data(virq);
0438
0439 spin_lock_irqsave(&gic_lock, flags);
0440 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
0441 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
0442 irq_data_update_effective_affinity(data, cpumask_of(cpu));
0443 spin_unlock_irqrestore(&gic_lock, flags);
0444
0445 return 0;
0446 }
0447
0448 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
0449 const u32 *intspec, unsigned int intsize,
0450 irq_hw_number_t *out_hwirq,
0451 unsigned int *out_type)
0452 {
0453 if (intsize != 3)
0454 return -EINVAL;
0455
0456 if (intspec[0] == GIC_SHARED)
0457 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
0458 else if (intspec[0] == GIC_LOCAL)
0459 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
0460 else
0461 return -EINVAL;
0462 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
0463
0464 return 0;
0465 }
0466
0467 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
0468 irq_hw_number_t hwirq)
0469 {
0470 struct gic_all_vpes_chip_data *cd;
0471 unsigned long flags;
0472 unsigned int intr;
0473 int err, cpu;
0474 u32 map;
0475
0476 if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
0477 #ifdef CONFIG_GENERIC_IRQ_IPI
0478
0479 if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
0480 return -EBUSY;
0481 #endif
0482
0483 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
0484 &gic_level_irq_controller,
0485 NULL);
0486 if (err)
0487 return err;
0488
0489 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
0490 return gic_shared_irq_domain_map(d, virq, hwirq, 0);
0491 }
0492
0493 intr = GIC_HWIRQ_TO_LOCAL(hwirq);
0494 map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
0495
0496
0497
0498
0499
0500 switch (intr) {
0501 case GIC_LOCAL_INT_TIMER:
0502
0503 map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
0504 fallthrough;
0505 case GIC_LOCAL_INT_PERFCTR:
0506 case GIC_LOCAL_INT_FDC:
0507
0508
0509
0510
0511
0512 cd = &gic_all_vpes_chip_data[intr];
0513 cd->map = map;
0514 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
0515 &gic_all_vpes_local_irq_controller,
0516 cd);
0517 if (err)
0518 return err;
0519
0520 irq_set_handler(virq, handle_percpu_irq);
0521 break;
0522
0523 default:
0524 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
0525 &gic_local_irq_controller,
0526 NULL);
0527 if (err)
0528 return err;
0529
0530 irq_set_handler(virq, handle_percpu_devid_irq);
0531 irq_set_percpu_devid(virq);
0532 break;
0533 }
0534
0535 if (!gic_local_irq_is_routable(intr))
0536 return -EPERM;
0537
0538 spin_lock_irqsave(&gic_lock, flags);
0539 for_each_online_cpu(cpu) {
0540 write_gic_vl_other(mips_cm_vp_id(cpu));
0541 write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
0542 }
0543 spin_unlock_irqrestore(&gic_lock, flags);
0544
0545 return 0;
0546 }
0547
0548 static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
0549 unsigned int nr_irqs, void *arg)
0550 {
0551 struct irq_fwspec *fwspec = arg;
0552 irq_hw_number_t hwirq;
0553
0554 if (fwspec->param[0] == GIC_SHARED)
0555 hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
0556 else
0557 hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
0558
0559 return gic_irq_domain_map(d, virq, hwirq);
0560 }
0561
0562 void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
0563 unsigned int nr_irqs)
0564 {
0565 }
0566
0567 static const struct irq_domain_ops gic_irq_domain_ops = {
0568 .xlate = gic_irq_domain_xlate,
0569 .alloc = gic_irq_domain_alloc,
0570 .free = gic_irq_domain_free,
0571 .map = gic_irq_domain_map,
0572 };
0573
0574 #ifdef CONFIG_GENERIC_IRQ_IPI
0575
0576 static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
0577 const u32 *intspec, unsigned int intsize,
0578 irq_hw_number_t *out_hwirq,
0579 unsigned int *out_type)
0580 {
0581
0582
0583
0584
0585 *out_hwirq = 0;
0586 *out_type = IRQ_TYPE_EDGE_RISING;
0587
0588 return 0;
0589 }
0590
0591 static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
0592 unsigned int nr_irqs, void *arg)
0593 {
0594 struct cpumask *ipimask = arg;
0595 irq_hw_number_t hwirq, base_hwirq;
0596 int cpu, ret, i;
0597
0598 base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
0599 if (base_hwirq == gic_shared_intrs)
0600 return -ENOMEM;
0601
0602
0603 for (i = base_hwirq; i < nr_irqs; i++) {
0604 if (!test_bit(i, ipi_available))
0605 return -EBUSY;
0606 }
0607 bitmap_clear(ipi_available, base_hwirq, nr_irqs);
0608
0609
0610 i = 0;
0611 for_each_cpu(cpu, ipimask) {
0612 hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
0613
0614 ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
0615 &gic_edge_irq_controller,
0616 NULL);
0617 if (ret)
0618 goto error;
0619
0620 ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq,
0621 &gic_edge_irq_controller,
0622 NULL);
0623 if (ret)
0624 goto error;
0625
0626 ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
0627 if (ret)
0628 goto error;
0629
0630 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
0631 if (ret)
0632 goto error;
0633
0634 i++;
0635 }
0636
0637 return 0;
0638 error:
0639 bitmap_set(ipi_available, base_hwirq, nr_irqs);
0640 return ret;
0641 }
0642
0643 static void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
0644 unsigned int nr_irqs)
0645 {
0646 irq_hw_number_t base_hwirq;
0647 struct irq_data *data;
0648
0649 data = irq_get_irq_data(virq);
0650 if (!data)
0651 return;
0652
0653 base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
0654 bitmap_set(ipi_available, base_hwirq, nr_irqs);
0655 }
0656
0657 static int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
0658 enum irq_domain_bus_token bus_token)
0659 {
0660 bool is_ipi;
0661
0662 switch (bus_token) {
0663 case DOMAIN_BUS_IPI:
0664 is_ipi = d->bus_token == bus_token;
0665 return (!node || to_of_node(d->fwnode) == node) && is_ipi;
0666 break;
0667 default:
0668 return 0;
0669 }
0670 }
0671
0672 static const struct irq_domain_ops gic_ipi_domain_ops = {
0673 .xlate = gic_ipi_domain_xlate,
0674 .alloc = gic_ipi_domain_alloc,
0675 .free = gic_ipi_domain_free,
0676 .match = gic_ipi_domain_match,
0677 };
0678
0679 static int gic_register_ipi_domain(struct device_node *node)
0680 {
0681 struct irq_domain *gic_ipi_domain;
0682 unsigned int v[2], num_ipis;
0683
0684 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
0685 IRQ_DOMAIN_FLAG_IPI_PER_CPU,
0686 GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
0687 node, &gic_ipi_domain_ops, NULL);
0688 if (!gic_ipi_domain) {
0689 pr_err("Failed to add IPI domain");
0690 return -ENXIO;
0691 }
0692
0693 irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
0694
0695 if (node &&
0696 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
0697 bitmap_set(ipi_resrv, v[0], v[1]);
0698 } else {
0699
0700
0701
0702
0703 num_ipis = 2 * num_possible_cpus();
0704 bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
0705 }
0706
0707 bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
0708
0709 return 0;
0710 }
0711
0712 #else
0713
0714 static inline int gic_register_ipi_domain(struct device_node *node)
0715 {
0716 return 0;
0717 }
0718
0719 #endif
0720
0721 static int gic_cpu_startup(unsigned int cpu)
0722 {
0723
0724 change_gic_vl_ctl(GIC_VX_CTL_EIC,
0725 cpu_has_veic ? GIC_VX_CTL_EIC : 0);
0726
0727
0728 write_gic_vl_rmask(~0);
0729
0730
0731 gic_all_vpes_irq_cpu_online();
0732
0733 return 0;
0734 }
0735
0736 static int __init gic_of_init(struct device_node *node,
0737 struct device_node *parent)
0738 {
0739 unsigned int cpu_vec, i, gicconfig;
0740 unsigned long reserved;
0741 phys_addr_t gic_base;
0742 struct resource res;
0743 size_t gic_len;
0744 int ret;
0745
0746
0747 i = 0;
0748 reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0);
0749 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
0750 i++, &cpu_vec))
0751 reserved |= BIT(cpu_vec);
0752
0753 cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM));
0754 if (cpu_vec == hweight_long(ST0_IM)) {
0755 pr_err("No CPU vectors available\n");
0756 return -ENODEV;
0757 }
0758
0759 if (of_address_to_resource(node, 0, &res)) {
0760
0761
0762
0763
0764 if (mips_cm_present()) {
0765 gic_base = read_gcr_gic_base() &
0766 ~CM_GCR_GIC_BASE_GICEN;
0767 gic_len = 0x20000;
0768 pr_warn("Using inherited base address %pa\n",
0769 &gic_base);
0770 } else {
0771 pr_err("Failed to get memory range\n");
0772 return -ENODEV;
0773 }
0774 } else {
0775 gic_base = res.start;
0776 gic_len = resource_size(&res);
0777 }
0778
0779 if (mips_cm_present()) {
0780 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN);
0781
0782 __sync();
0783 }
0784
0785 mips_gic_base = ioremap(gic_base, gic_len);
0786 if (!mips_gic_base) {
0787 pr_err("Failed to ioremap gic_base\n");
0788 return -ENOMEM;
0789 }
0790
0791 gicconfig = read_gic_config();
0792 gic_shared_intrs = FIELD_GET(GIC_CONFIG_NUMINTERRUPTS, gicconfig);
0793 gic_shared_intrs = (gic_shared_intrs + 1) * 8;
0794
0795 if (cpu_has_veic) {
0796
0797 gic_cpu_pin = 0;
0798 timer_cpu_pin = gic_cpu_pin;
0799 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
0800 __gic_irq_dispatch);
0801 } else {
0802 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
0803 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
0804 gic_irq_dispatch);
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816 if (IS_ENABLED(CONFIG_MIPS_CMP) &&
0817 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
0818 timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP;
0819 irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
0820 GIC_CPU_PIN_OFFSET +
0821 timer_cpu_pin,
0822 gic_irq_dispatch);
0823 } else {
0824 timer_cpu_pin = gic_cpu_pin;
0825 }
0826 }
0827
0828 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
0829 gic_shared_intrs, 0,
0830 &gic_irq_domain_ops, NULL);
0831 if (!gic_irq_domain) {
0832 pr_err("Failed to add IRQ domain");
0833 return -ENXIO;
0834 }
0835
0836 ret = gic_register_ipi_domain(node);
0837 if (ret)
0838 return ret;
0839
0840 board_bind_eic_interrupt = &gic_bind_eic_interrupt;
0841
0842
0843 for (i = 0; i < gic_shared_intrs; i++) {
0844 change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
0845 change_gic_trig(i, GIC_TRIG_LEVEL);
0846 write_gic_rmask(i);
0847 }
0848
0849 return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING,
0850 "irqchip/mips/gic:starting",
0851 gic_cpu_startup, NULL);
0852 }
0853 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);