0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0047
0048 #include <linux/bits.h>
0049 #include <linux/bitfield.h>
0050 #include <linux/cpuhotplug.h>
0051 #include <linux/io.h>
0052 #include <linux/irqchip.h>
0053 #include <linux/irqchip/arm-vgic-info.h>
0054 #include <linux/irqdomain.h>
0055 #include <linux/jump_label.h>
0056 #include <linux/limits.h>
0057 #include <linux/of_address.h>
0058 #include <linux/slab.h>
0059 #include <asm/apple_m1_pmu.h>
0060 #include <asm/cputype.h>
0061 #include <asm/exception.h>
0062 #include <asm/sysreg.h>
0063 #include <asm/virt.h>
0064
0065 #include <dt-bindings/interrupt-controller/apple-aic.h>
0066
0067
0068
0069
0070
0071 #define AIC_INFO 0x0004
0072 #define AIC_INFO_NR_IRQ GENMASK(15, 0)
0073
0074 #define AIC_CONFIG 0x0010
0075
0076 #define AIC_WHOAMI 0x2000
0077 #define AIC_EVENT 0x2004
0078 #define AIC_EVENT_DIE GENMASK(31, 24)
0079 #define AIC_EVENT_TYPE GENMASK(23, 16)
0080 #define AIC_EVENT_NUM GENMASK(15, 0)
0081
0082 #define AIC_EVENT_TYPE_FIQ 0
0083 #define AIC_EVENT_TYPE_IRQ 1
0084 #define AIC_EVENT_TYPE_IPI 4
0085 #define AIC_EVENT_IPI_OTHER 1
0086 #define AIC_EVENT_IPI_SELF 2
0087
0088 #define AIC_IPI_SEND 0x2008
0089 #define AIC_IPI_ACK 0x200c
0090 #define AIC_IPI_MASK_SET 0x2024
0091 #define AIC_IPI_MASK_CLR 0x2028
0092
0093 #define AIC_IPI_SEND_CPU(cpu) BIT(cpu)
0094
0095 #define AIC_IPI_OTHER BIT(0)
0096 #define AIC_IPI_SELF BIT(31)
0097
0098 #define AIC_TARGET_CPU 0x3000
0099
0100 #define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7))
0101 #define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7))
0102 #define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7))
0103 #define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7))
0104
0105 #define AIC_MAX_IRQ 0x400
0106
0107
0108
0109
0110
0111 #define AIC2_VERSION 0x0000
0112 #define AIC2_VERSION_VER GENMASK(7, 0)
0113
0114 #define AIC2_INFO1 0x0004
0115 #define AIC2_INFO1_NR_IRQ GENMASK(15, 0)
0116 #define AIC2_INFO1_LAST_DIE GENMASK(27, 24)
0117
0118 #define AIC2_INFO2 0x0008
0119
0120 #define AIC2_INFO3 0x000c
0121 #define AIC2_INFO3_MAX_IRQ GENMASK(15, 0)
0122 #define AIC2_INFO3_MAX_DIE GENMASK(27, 24)
0123
0124 #define AIC2_RESET 0x0010
0125 #define AIC2_RESET_RESET BIT(0)
0126
0127 #define AIC2_CONFIG 0x0014
0128 #define AIC2_CONFIG_ENABLE BIT(0)
0129 #define AIC2_CONFIG_PREFER_PCPU BIT(28)
0130
0131 #define AIC2_TIMEOUT 0x0028
0132 #define AIC2_CLUSTER_PRIO 0x0030
0133 #define AIC2_DELAY_GROUPS 0x0100
0134
0135 #define AIC2_IRQ_CFG 0x2000
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156 #define AIC2_IRQ_CFG_TARGET GENMASK(3, 0)
0157 #define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5)
0158
0159 #define MASK_REG(x) (4 * ((x) >> 5))
0160 #define MASK_BIT(x) BIT((x) & GENMASK(4, 0))
0161
0162
0163
0164
0165
0166
0167 #define SYS_IMP_APL_IPI_RR_LOCAL_EL1 sys_reg(3, 5, 15, 0, 0)
0168 #define SYS_IMP_APL_IPI_RR_GLOBAL_EL1 sys_reg(3, 5, 15, 0, 1)
0169 #define IPI_RR_CPU GENMASK(7, 0)
0170
0171 #define IPI_RR_CLUSTER GENMASK(23, 16)
0172 #define IPI_RR_TYPE GENMASK(29, 28)
0173 #define IPI_RR_IMMEDIATE 0
0174 #define IPI_RR_RETRACT 1
0175 #define IPI_RR_DEFERRED 2
0176 #define IPI_RR_NOWAKE 3
0177
0178
0179 #define SYS_IMP_APL_IPI_SR_EL1 sys_reg(3, 5, 15, 1, 1)
0180 #define IPI_SR_PENDING BIT(0)
0181
0182
0183 #define SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2 sys_reg(3, 5, 15, 1, 3)
0184 #define VM_TMR_FIQ_ENABLE_V BIT(0)
0185 #define VM_TMR_FIQ_ENABLE_P BIT(1)
0186
0187
0188 #define SYS_IMP_APL_IPI_CR_EL1 sys_reg(3, 5, 15, 3, 1)
0189
0190
0191 #define SYS_IMP_APL_UPMCR0_EL1 sys_reg(3, 7, 15, 0, 4)
0192 #define UPMCR0_IMODE GENMASK(18, 16)
0193 #define UPMCR0_IMODE_OFF 0
0194 #define UPMCR0_IMODE_AIC 2
0195 #define UPMCR0_IMODE_HALT 3
0196 #define UPMCR0_IMODE_FIQ 4
0197
0198
0199 #define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4)
0200 #define UPMSR_IACT BIT(0)
0201
0202
0203 #define MPIDR_CPU(x) MPIDR_AFFINITY_LEVEL(x, 0)
0204 #define MPIDR_CLUSTER(x) MPIDR_AFFINITY_LEVEL(x, 1)
0205
0206 #define AIC_IRQ_HWIRQ(die, irq) (FIELD_PREP(AIC_EVENT_DIE, die) | \
0207 FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_IRQ) | \
0208 FIELD_PREP(AIC_EVENT_NUM, irq))
0209 #define AIC_FIQ_HWIRQ(x) (FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_FIQ) | \
0210 FIELD_PREP(AIC_EVENT_NUM, x))
0211 #define AIC_HWIRQ_IRQ(x) FIELD_GET(AIC_EVENT_NUM, x)
0212 #define AIC_HWIRQ_DIE(x) FIELD_GET(AIC_EVENT_DIE, x)
0213 #define AIC_NR_FIQ 6
0214 #define AIC_NR_SWIPI 32
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226 #define AIC_TMR_EL0_PHYS AIC_TMR_HV_PHYS
0227 #define AIC_TMR_EL0_VIRT AIC_TMR_HV_VIRT
0228 #define AIC_TMR_EL02_PHYS AIC_TMR_GUEST_PHYS
0229 #define AIC_TMR_EL02_VIRT AIC_TMR_GUEST_VIRT
0230
0231 static DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
0232
0233 struct aic_info {
0234 int version;
0235
0236
0237 u32 event;
0238 u32 target_cpu;
0239 u32 irq_cfg;
0240 u32 sw_set;
0241 u32 sw_clr;
0242 u32 mask_set;
0243 u32 mask_clr;
0244
0245 u32 die_stride;
0246
0247
0248 bool fast_ipi;
0249 };
0250
0251 static const struct aic_info aic1_info = {
0252 .version = 1,
0253
0254 .event = AIC_EVENT,
0255 .target_cpu = AIC_TARGET_CPU,
0256 };
0257
0258 static const struct aic_info aic1_fipi_info = {
0259 .version = 1,
0260
0261 .event = AIC_EVENT,
0262 .target_cpu = AIC_TARGET_CPU,
0263
0264 .fast_ipi = true,
0265 };
0266
0267 static const struct aic_info aic2_info = {
0268 .version = 2,
0269
0270 .irq_cfg = AIC2_IRQ_CFG,
0271
0272 .fast_ipi = true,
0273 };
0274
0275 static const struct of_device_id aic_info_match[] = {
0276 {
0277 .compatible = "apple,t8103-aic",
0278 .data = &aic1_fipi_info,
0279 },
0280 {
0281 .compatible = "apple,aic",
0282 .data = &aic1_info,
0283 },
0284 {
0285 .compatible = "apple,aic2",
0286 .data = &aic2_info,
0287 },
0288 {}
0289 };
0290
0291 struct aic_irq_chip {
0292 void __iomem *base;
0293 void __iomem *event;
0294 struct irq_domain *hw_domain;
0295 struct irq_domain *ipi_domain;
0296 struct {
0297 cpumask_t aff;
0298 } *fiq_aff[AIC_NR_FIQ];
0299
0300 int nr_irq;
0301 int max_irq;
0302 int nr_die;
0303 int max_die;
0304
0305 struct aic_info info;
0306 };
0307
0308 static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
0309
0310 static DEFINE_PER_CPU(atomic_t, aic_vipi_flag);
0311 static DEFINE_PER_CPU(atomic_t, aic_vipi_enable);
0312
0313 static struct aic_irq_chip *aic_irqc;
0314
0315 static void aic_handle_ipi(struct pt_regs *regs);
0316
0317 static u32 aic_ic_read(struct aic_irq_chip *ic, u32 reg)
0318 {
0319 return readl_relaxed(ic->base + reg);
0320 }
0321
0322 static void aic_ic_write(struct aic_irq_chip *ic, u32 reg, u32 val)
0323 {
0324 writel_relaxed(val, ic->base + reg);
0325 }
0326
0327
0328
0329
0330
0331 static void aic_irq_mask(struct irq_data *d)
0332 {
0333 irq_hw_number_t hwirq = irqd_to_hwirq(d);
0334 struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
0335
0336 u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
0337 u32 irq = AIC_HWIRQ_IRQ(hwirq);
0338
0339 aic_ic_write(ic, ic->info.mask_set + off + MASK_REG(irq), MASK_BIT(irq));
0340 }
0341
0342 static void aic_irq_unmask(struct irq_data *d)
0343 {
0344 irq_hw_number_t hwirq = irqd_to_hwirq(d);
0345 struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
0346
0347 u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
0348 u32 irq = AIC_HWIRQ_IRQ(hwirq);
0349
0350 aic_ic_write(ic, ic->info.mask_clr + off + MASK_REG(irq), MASK_BIT(irq));
0351 }
0352
0353 static void aic_irq_eoi(struct irq_data *d)
0354 {
0355
0356
0357
0358
0359 if (!irqd_irq_masked(d))
0360 aic_irq_unmask(d);
0361 }
0362
0363 static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
0364 {
0365 struct aic_irq_chip *ic = aic_irqc;
0366 u32 event, type, irq;
0367
0368 do {
0369
0370
0371
0372
0373 event = readl(ic->event + ic->info.event);
0374 type = FIELD_GET(AIC_EVENT_TYPE, event);
0375 irq = FIELD_GET(AIC_EVENT_NUM, event);
0376
0377 if (type == AIC_EVENT_TYPE_IRQ)
0378 generic_handle_domain_irq(aic_irqc->hw_domain, event);
0379 else if (type == AIC_EVENT_TYPE_IPI && irq == 1)
0380 aic_handle_ipi(regs);
0381 else if (event != 0)
0382 pr_err_ratelimited("Unknown IRQ event %d, %d\n", type, irq);
0383 } while (event);
0384
0385
0386
0387
0388
0389
0390
0391 if (is_kernel_in_hyp_mode() && (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) &&
0392 read_sysreg_s(SYS_ICH_MISR_EL2) != 0) {
0393 pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n");
0394 sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
0395 }
0396 }
0397
0398 static int aic_irq_set_affinity(struct irq_data *d,
0399 const struct cpumask *mask_val, bool force)
0400 {
0401 irq_hw_number_t hwirq = irqd_to_hwirq(d);
0402 struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
0403 int cpu;
0404
0405 BUG_ON(!ic->info.target_cpu);
0406
0407 if (force)
0408 cpu = cpumask_first(mask_val);
0409 else
0410 cpu = cpumask_any_and(mask_val, cpu_online_mask);
0411
0412 aic_ic_write(ic, ic->info.target_cpu + AIC_HWIRQ_IRQ(hwirq) * 4, BIT(cpu));
0413 irq_data_update_effective_affinity(d, cpumask_of(cpu));
0414
0415 return IRQ_SET_MASK_OK;
0416 }
0417
0418 static int aic_irq_set_type(struct irq_data *d, unsigned int type)
0419 {
0420
0421
0422
0423
0424 return (type == IRQ_TYPE_LEVEL_HIGH || type == IRQ_TYPE_EDGE_RISING) ? 0 : -EINVAL;
0425 }
0426
0427 static struct irq_chip aic_chip = {
0428 .name = "AIC",
0429 .irq_mask = aic_irq_mask,
0430 .irq_unmask = aic_irq_unmask,
0431 .irq_eoi = aic_irq_eoi,
0432 .irq_set_affinity = aic_irq_set_affinity,
0433 .irq_set_type = aic_irq_set_type,
0434 };
0435
0436 static struct irq_chip aic2_chip = {
0437 .name = "AIC2",
0438 .irq_mask = aic_irq_mask,
0439 .irq_unmask = aic_irq_unmask,
0440 .irq_eoi = aic_irq_eoi,
0441 .irq_set_type = aic_irq_set_type,
0442 };
0443
0444
0445
0446
0447
0448 static unsigned long aic_fiq_get_idx(struct irq_data *d)
0449 {
0450 return AIC_HWIRQ_IRQ(irqd_to_hwirq(d));
0451 }
0452
0453 static void aic_fiq_set_mask(struct irq_data *d)
0454 {
0455
0456 switch (aic_fiq_get_idx(d)) {
0457 case AIC_TMR_EL02_PHYS:
0458 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_P, 0);
0459 isb();
0460 break;
0461 case AIC_TMR_EL02_VIRT:
0462 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_V, 0);
0463 isb();
0464 break;
0465 default:
0466 break;
0467 }
0468 }
0469
0470 static void aic_fiq_clear_mask(struct irq_data *d)
0471 {
0472 switch (aic_fiq_get_idx(d)) {
0473 case AIC_TMR_EL02_PHYS:
0474 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_P);
0475 isb();
0476 break;
0477 case AIC_TMR_EL02_VIRT:
0478 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_V);
0479 isb();
0480 break;
0481 default:
0482 break;
0483 }
0484 }
0485
0486 static void aic_fiq_mask(struct irq_data *d)
0487 {
0488 aic_fiq_set_mask(d);
0489 __this_cpu_and(aic_fiq_unmasked, ~BIT(aic_fiq_get_idx(d)));
0490 }
0491
0492 static void aic_fiq_unmask(struct irq_data *d)
0493 {
0494 aic_fiq_clear_mask(d);
0495 __this_cpu_or(aic_fiq_unmasked, BIT(aic_fiq_get_idx(d)));
0496 }
0497
0498 static void aic_fiq_eoi(struct irq_data *d)
0499 {
0500
0501 if (__this_cpu_read(aic_fiq_unmasked) & BIT(aic_fiq_get_idx(d)))
0502 aic_fiq_clear_mask(d);
0503 }
0504
0505 #define TIMER_FIRING(x) \
0506 (((x) & (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_MASK | \
0507 ARCH_TIMER_CTRL_IT_STAT)) == \
0508 (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
0509
0510 static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
0511 {
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527 if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) {
0528 if (static_branch_likely(&use_fast_ipi)) {
0529 aic_handle_ipi(regs);
0530 } else {
0531 pr_err_ratelimited("Fast IPI fired. Acking.\n");
0532 write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
0533 }
0534 }
0535
0536 if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
0537 generic_handle_domain_irq(aic_irqc->hw_domain,
0538 AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS));
0539
0540 if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)))
0541 generic_handle_domain_irq(aic_irqc->hw_domain,
0542 AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT));
0543
0544 if (is_kernel_in_hyp_mode()) {
0545 uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2);
0546
0547 if ((enabled & VM_TMR_FIQ_ENABLE_P) &&
0548 TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02)))
0549 generic_handle_domain_irq(aic_irqc->hw_domain,
0550 AIC_FIQ_HWIRQ(AIC_TMR_EL02_PHYS));
0551
0552 if ((enabled & VM_TMR_FIQ_ENABLE_V) &&
0553 TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02)))
0554 generic_handle_domain_irq(aic_irqc->hw_domain,
0555 AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
0556 }
0557
0558 if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) {
0559 int irq;
0560 if (cpumask_test_cpu(smp_processor_id(),
0561 &aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
0562 irq = AIC_CPU_PMU_P;
0563 else
0564 irq = AIC_CPU_PMU_E;
0565 generic_handle_domain_irq(aic_irqc->hw_domain,
0566 AIC_FIQ_HWIRQ(irq));
0567 }
0568
0569 if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ &&
0570 (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) {
0571
0572 pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n");
0573 sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
0574 FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
0575 }
0576 }
0577
0578 static int aic_fiq_set_type(struct irq_data *d, unsigned int type)
0579 {
0580 return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL;
0581 }
0582
0583 static struct irq_chip fiq_chip = {
0584 .name = "AIC-FIQ",
0585 .irq_mask = aic_fiq_mask,
0586 .irq_unmask = aic_fiq_unmask,
0587 .irq_ack = aic_fiq_set_mask,
0588 .irq_eoi = aic_fiq_eoi,
0589 .irq_set_type = aic_fiq_set_type,
0590 };
0591
0592
0593
0594
0595
0596 static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq,
0597 irq_hw_number_t hw)
0598 {
0599 struct aic_irq_chip *ic = id->host_data;
0600 u32 type = FIELD_GET(AIC_EVENT_TYPE, hw);
0601 struct irq_chip *chip = &aic_chip;
0602
0603 if (ic->info.version == 2)
0604 chip = &aic2_chip;
0605
0606 if (type == AIC_EVENT_TYPE_IRQ) {
0607 irq_domain_set_info(id, irq, hw, chip, id->host_data,
0608 handle_fasteoi_irq, NULL, NULL);
0609 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
0610 } else {
0611 int fiq = FIELD_GET(AIC_EVENT_NUM, hw);
0612
0613 switch (fiq) {
0614 case AIC_CPU_PMU_P:
0615 case AIC_CPU_PMU_E:
0616 irq_set_percpu_devid_partition(irq, &ic->fiq_aff[fiq]->aff);
0617 break;
0618 default:
0619 irq_set_percpu_devid(irq);
0620 break;
0621 }
0622
0623 irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data,
0624 handle_percpu_devid_irq, NULL, NULL);
0625 }
0626
0627 return 0;
0628 }
0629
0630 static int aic_irq_domain_translate(struct irq_domain *id,
0631 struct irq_fwspec *fwspec,
0632 unsigned long *hwirq,
0633 unsigned int *type)
0634 {
0635 struct aic_irq_chip *ic = id->host_data;
0636 u32 *args;
0637 u32 die = 0;
0638
0639 if (fwspec->param_count < 3 || fwspec->param_count > 4 ||
0640 !is_of_node(fwspec->fwnode))
0641 return -EINVAL;
0642
0643 args = &fwspec->param[1];
0644
0645 if (fwspec->param_count == 4) {
0646 die = args[0];
0647 args++;
0648 }
0649
0650 switch (fwspec->param[0]) {
0651 case AIC_IRQ:
0652 if (die >= ic->nr_die)
0653 return -EINVAL;
0654 if (args[0] >= ic->nr_irq)
0655 return -EINVAL;
0656 *hwirq = AIC_IRQ_HWIRQ(die, args[0]);
0657 break;
0658 case AIC_FIQ:
0659 if (die != 0)
0660 return -EINVAL;
0661 if (args[0] >= AIC_NR_FIQ)
0662 return -EINVAL;
0663 *hwirq = AIC_FIQ_HWIRQ(args[0]);
0664
0665
0666
0667
0668
0669 if (!is_kernel_in_hyp_mode()) {
0670 switch (args[0]) {
0671 case AIC_TMR_GUEST_PHYS:
0672 *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS);
0673 break;
0674 case AIC_TMR_GUEST_VIRT:
0675 *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT);
0676 break;
0677 case AIC_TMR_HV_PHYS:
0678 case AIC_TMR_HV_VIRT:
0679 return -ENOENT;
0680 default:
0681 break;
0682 }
0683 }
0684 break;
0685 default:
0686 return -EINVAL;
0687 }
0688
0689 *type = args[1] & IRQ_TYPE_SENSE_MASK;
0690
0691 return 0;
0692 }
0693
0694 static int aic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
0695 unsigned int nr_irqs, void *arg)
0696 {
0697 unsigned int type = IRQ_TYPE_NONE;
0698 struct irq_fwspec *fwspec = arg;
0699 irq_hw_number_t hwirq;
0700 int i, ret;
0701
0702 ret = aic_irq_domain_translate(domain, fwspec, &hwirq, &type);
0703 if (ret)
0704 return ret;
0705
0706 for (i = 0; i < nr_irqs; i++) {
0707 ret = aic_irq_domain_map(domain, virq + i, hwirq + i);
0708 if (ret)
0709 return ret;
0710 }
0711
0712 return 0;
0713 }
0714
0715 static void aic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
0716 unsigned int nr_irqs)
0717 {
0718 int i;
0719
0720 for (i = 0; i < nr_irqs; i++) {
0721 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
0722
0723 irq_set_handler(virq + i, NULL);
0724 irq_domain_reset_irq_data(d);
0725 }
0726 }
0727
0728 static const struct irq_domain_ops aic_irq_domain_ops = {
0729 .translate = aic_irq_domain_translate,
0730 .alloc = aic_irq_domain_alloc,
0731 .free = aic_irq_domain_free,
0732 };
0733
0734
0735
0736
0737
0738 static void aic_ipi_send_fast(int cpu)
0739 {
0740 u64 mpidr = cpu_logical_map(cpu);
0741 u64 my_mpidr = read_cpuid_mpidr();
0742 u64 cluster = MPIDR_CLUSTER(mpidr);
0743 u64 idx = MPIDR_CPU(mpidr);
0744
0745 if (MPIDR_CLUSTER(my_mpidr) == cluster)
0746 write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx),
0747 SYS_IMP_APL_IPI_RR_LOCAL_EL1);
0748 else
0749 write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster),
0750 SYS_IMP_APL_IPI_RR_GLOBAL_EL1);
0751 isb();
0752 }
0753
0754 static void aic_ipi_mask(struct irq_data *d)
0755 {
0756 u32 irq_bit = BIT(irqd_to_hwirq(d));
0757
0758
0759 atomic_andnot(irq_bit, this_cpu_ptr(&aic_vipi_enable));
0760 }
0761
0762 static void aic_ipi_unmask(struct irq_data *d)
0763 {
0764 struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
0765 u32 irq_bit = BIT(irqd_to_hwirq(d));
0766
0767 atomic_or(irq_bit, this_cpu_ptr(&aic_vipi_enable));
0768
0769
0770
0771
0772
0773 smp_mb__after_atomic();
0774
0775
0776
0777
0778
0779 if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) {
0780 if (static_branch_likely(&use_fast_ipi))
0781 aic_ipi_send_fast(smp_processor_id());
0782 else
0783 aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
0784 }
0785 }
0786
0787 static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
0788 {
0789 struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
0790 u32 irq_bit = BIT(irqd_to_hwirq(d));
0791 u32 send = 0;
0792 int cpu;
0793 unsigned long pending;
0794
0795 for_each_cpu(cpu, mask) {
0796
0797
0798
0799
0800
0801
0802
0803 pending = atomic_fetch_or_release(irq_bit, per_cpu_ptr(&aic_vipi_flag, cpu));
0804
0805
0806
0807
0808
0809 smp_mb__after_atomic();
0810
0811 if (!(pending & irq_bit) &&
0812 (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) {
0813 if (static_branch_likely(&use_fast_ipi))
0814 aic_ipi_send_fast(cpu);
0815 else
0816 send |= AIC_IPI_SEND_CPU(cpu);
0817 }
0818 }
0819
0820
0821
0822
0823
0824
0825
0826 if (send)
0827 aic_ic_write(ic, AIC_IPI_SEND, send);
0828 }
0829
0830 static struct irq_chip ipi_chip = {
0831 .name = "AIC-IPI",
0832 .irq_mask = aic_ipi_mask,
0833 .irq_unmask = aic_ipi_unmask,
0834 .ipi_send_mask = aic_ipi_send_mask,
0835 };
0836
0837
0838
0839
0840
0841 static void aic_handle_ipi(struct pt_regs *regs)
0842 {
0843 int i;
0844 unsigned long enabled, firing;
0845
0846
0847
0848
0849
0850
0851
0852
0853 if (static_branch_likely(&use_fast_ipi)) {
0854 write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
0855 isb();
0856 } else {
0857 aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
0858 }
0859
0860
0861
0862
0863
0864
0865
0866 enabled = atomic_read(this_cpu_ptr(&aic_vipi_enable));
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877 firing = atomic_fetch_andnot(enabled, this_cpu_ptr(&aic_vipi_flag)) & enabled;
0878
0879 for_each_set_bit(i, &firing, AIC_NR_SWIPI)
0880 generic_handle_domain_irq(aic_irqc->ipi_domain, i);
0881
0882
0883
0884
0885
0886 if (!static_branch_likely(&use_fast_ipi))
0887 aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
0888 }
0889
0890 static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq,
0891 unsigned int nr_irqs, void *args)
0892 {
0893 int i;
0894
0895 for (i = 0; i < nr_irqs; i++) {
0896 irq_set_percpu_devid(virq + i);
0897 irq_domain_set_info(d, virq + i, i, &ipi_chip, d->host_data,
0898 handle_percpu_devid_irq, NULL, NULL);
0899 }
0900
0901 return 0;
0902 }
0903
0904 static void aic_ipi_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
0905 {
0906
0907 }
0908
0909 static const struct irq_domain_ops aic_ipi_domain_ops = {
0910 .alloc = aic_ipi_alloc,
0911 .free = aic_ipi_free,
0912 };
0913
0914 static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
0915 {
0916 struct irq_domain *ipi_domain;
0917 int base_ipi;
0918
0919 ipi_domain = irq_domain_create_linear(irqc->hw_domain->fwnode, AIC_NR_SWIPI,
0920 &aic_ipi_domain_ops, irqc);
0921 if (WARN_ON(!ipi_domain))
0922 return -ENODEV;
0923
0924 ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
0925 irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
0926
0927 base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, AIC_NR_SWIPI,
0928 NUMA_NO_NODE, NULL, false, NULL);
0929
0930 if (WARN_ON(!base_ipi)) {
0931 irq_domain_remove(ipi_domain);
0932 return -ENODEV;
0933 }
0934
0935 set_smp_ipi_range(base_ipi, AIC_NR_SWIPI);
0936
0937 irqc->ipi_domain = ipi_domain;
0938
0939 return 0;
0940 }
0941
0942 static int aic_init_cpu(unsigned int cpu)
0943 {
0944
0945
0946
0947 write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
0948
0949
0950 sysreg_clear_set(cntp_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
0951 sysreg_clear_set(cntv_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
0952
0953
0954 if (is_kernel_in_hyp_mode()) {
0955
0956 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2,
0957 VM_TMR_FIQ_ENABLE_V | VM_TMR_FIQ_ENABLE_P, 0);
0958
0959
0960 sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
0961 }
0962
0963
0964 sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT,
0965 FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF));
0966
0967
0968 sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
0969 FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
0970
0971
0972 isb();
0973
0974 if (aic_irqc->info.version == 1) {
0975
0976
0977
0978
0979
0980 WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id());
0981
0982
0983
0984
0985
0986
0987 aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);
0988 if (static_branch_likely(&use_fast_ipi)) {
0989 aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER);
0990 } else {
0991 aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
0992 aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
0993 }
0994 }
0995
0996
0997 __this_cpu_write(aic_fiq_unmasked, 0);
0998
0999 return 0;
1000 }
1001
1002 static struct gic_kvm_info vgic_info __initdata = {
1003 .type = GIC_V3,
1004 .no_maint_irq_mask = true,
1005 .no_hw_deactivation = true,
1006 };
1007
1008 static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff)
1009 {
1010 int i, n;
1011 u32 fiq;
1012
1013 if (of_property_read_u32(aff, "apple,fiq-index", &fiq) ||
1014 WARN_ON(fiq >= AIC_NR_FIQ) || ic->fiq_aff[fiq])
1015 return;
1016
1017 n = of_property_count_elems_of_size(aff, "cpus", sizeof(u32));
1018 if (WARN_ON(n < 0))
1019 return;
1020
1021 ic->fiq_aff[fiq] = kzalloc(sizeof(*ic->fiq_aff[fiq]), GFP_KERNEL);
1022 if (!ic->fiq_aff[fiq])
1023 return;
1024
1025 for (i = 0; i < n; i++) {
1026 struct device_node *cpu_node;
1027 u32 cpu_phandle;
1028 int cpu;
1029
1030 if (of_property_read_u32_index(aff, "cpus", i, &cpu_phandle))
1031 continue;
1032
1033 cpu_node = of_find_node_by_phandle(cpu_phandle);
1034 if (WARN_ON(!cpu_node))
1035 continue;
1036
1037 cpu = of_cpu_node_to_id(cpu_node);
1038 of_node_put(cpu_node);
1039 if (WARN_ON(cpu < 0))
1040 continue;
1041
1042 cpumask_set_cpu(cpu, &ic->fiq_aff[fiq]->aff);
1043 }
1044 }
1045
1046 static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent)
1047 {
1048 int i, die;
1049 u32 off, start_off;
1050 void __iomem *regs;
1051 struct aic_irq_chip *irqc;
1052 struct device_node *affs;
1053 const struct of_device_id *match;
1054
1055 regs = of_iomap(node, 0);
1056 if (WARN_ON(!regs))
1057 return -EIO;
1058
1059 irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
1060 if (!irqc) {
1061 iounmap(regs);
1062 return -ENOMEM;
1063 }
1064
1065 irqc->base = regs;
1066
1067 match = of_match_node(aic_info_match, node);
1068 if (!match)
1069 goto err_unmap;
1070
1071 irqc->info = *(struct aic_info *)match->data;
1072
1073 aic_irqc = irqc;
1074
1075 switch (irqc->info.version) {
1076 case 1: {
1077 u32 info;
1078
1079 info = aic_ic_read(irqc, AIC_INFO);
1080 irqc->nr_irq = FIELD_GET(AIC_INFO_NR_IRQ, info);
1081 irqc->max_irq = AIC_MAX_IRQ;
1082 irqc->nr_die = irqc->max_die = 1;
1083
1084 off = start_off = irqc->info.target_cpu;
1085 off += sizeof(u32) * irqc->max_irq;
1086
1087 irqc->event = irqc->base;
1088
1089 break;
1090 }
1091 case 2: {
1092 u32 info1, info3;
1093
1094 info1 = aic_ic_read(irqc, AIC2_INFO1);
1095 info3 = aic_ic_read(irqc, AIC2_INFO3);
1096
1097 irqc->nr_irq = FIELD_GET(AIC2_INFO1_NR_IRQ, info1);
1098 irqc->max_irq = FIELD_GET(AIC2_INFO3_MAX_IRQ, info3);
1099 irqc->nr_die = FIELD_GET(AIC2_INFO1_LAST_DIE, info1) + 1;
1100 irqc->max_die = FIELD_GET(AIC2_INFO3_MAX_DIE, info3);
1101
1102 off = start_off = irqc->info.irq_cfg;
1103 off += sizeof(u32) * irqc->max_irq;
1104
1105 irqc->event = of_iomap(node, 1);
1106 if (WARN_ON(!irqc->event))
1107 goto err_unmap;
1108
1109 break;
1110 }
1111 }
1112
1113 irqc->info.sw_set = off;
1114 off += sizeof(u32) * (irqc->max_irq >> 5);
1115 irqc->info.sw_clr = off;
1116 off += sizeof(u32) * (irqc->max_irq >> 5);
1117 irqc->info.mask_set = off;
1118 off += sizeof(u32) * (irqc->max_irq >> 5);
1119 irqc->info.mask_clr = off;
1120 off += sizeof(u32) * (irqc->max_irq >> 5);
1121 off += sizeof(u32) * (irqc->max_irq >> 5);
1122
1123 if (irqc->info.fast_ipi)
1124 static_branch_enable(&use_fast_ipi);
1125 else
1126 static_branch_disable(&use_fast_ipi);
1127
1128 irqc->info.die_stride = off - start_off;
1129
1130 irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node),
1131 &aic_irq_domain_ops, irqc);
1132 if (WARN_ON(!irqc->hw_domain))
1133 goto err_unmap;
1134
1135 irq_domain_update_bus_token(irqc->hw_domain, DOMAIN_BUS_WIRED);
1136
1137 if (aic_init_smp(irqc, node))
1138 goto err_remove_domain;
1139
1140 affs = of_get_child_by_name(node, "affinities");
1141 if (affs) {
1142 struct device_node *chld;
1143
1144 for_each_child_of_node(affs, chld)
1145 build_fiq_affinity(irqc, chld);
1146 }
1147 of_node_put(affs);
1148
1149 set_handle_irq(aic_handle_irq);
1150 set_handle_fiq(aic_handle_fiq);
1151
1152 off = 0;
1153 for (die = 0; die < irqc->nr_die; die++) {
1154 for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
1155 aic_ic_write(irqc, irqc->info.mask_set + off + i * 4, U32_MAX);
1156 for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
1157 aic_ic_write(irqc, irqc->info.sw_clr + off + i * 4, U32_MAX);
1158 if (irqc->info.target_cpu)
1159 for (i = 0; i < irqc->nr_irq; i++)
1160 aic_ic_write(irqc, irqc->info.target_cpu + off + i * 4, 1);
1161 off += irqc->info.die_stride;
1162 }
1163
1164 if (irqc->info.version == 2) {
1165 u32 config = aic_ic_read(irqc, AIC2_CONFIG);
1166
1167 config |= AIC2_CONFIG_ENABLE;
1168 aic_ic_write(irqc, AIC2_CONFIG, config);
1169 }
1170
1171 if (!is_kernel_in_hyp_mode())
1172 pr_info("Kernel running in EL1, mapping interrupts");
1173
1174 if (static_branch_likely(&use_fast_ipi))
1175 pr_info("Using Fast IPIs");
1176
1177 cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING,
1178 "irqchip/apple-aic/ipi:starting",
1179 aic_init_cpu, NULL);
1180
1181 vgic_set_kvm_info(&vgic_info);
1182
1183 pr_info("Initialized with %d/%d IRQs * %d/%d die(s), %d FIQs, %d vIPIs",
1184 irqc->nr_irq, irqc->max_irq, irqc->nr_die, irqc->max_die, AIC_NR_FIQ, AIC_NR_SWIPI);
1185
1186 return 0;
1187
1188 err_remove_domain:
1189 irq_domain_remove(irqc->hw_domain);
1190 err_unmap:
1191 if (irqc->event && irqc->event != irqc->base)
1192 iounmap(irqc->event);
1193 iounmap(irqc->base);
1194 kfree(irqc);
1195 return -ENODEV;
1196 }
1197
1198 IRQCHIP_DECLARE(apple_aic, "apple,aic", aic_of_ic_init);
1199 IRQCHIP_DECLARE(apple_aic2, "apple,aic2", aic_of_ic_init);