0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020 #include <linux/init.h>
0021 #include <linux/kernel.h>
0022 #include <linux/err.h>
0023 #include <linux/module.h>
0024 #include <linux/list.h>
0025 #include <linux/smp.h>
0026 #include <linux/cpu.h>
0027 #include <linux/cpu_pm.h>
0028 #include <linux/cpumask.h>
0029 #include <linux/io.h>
0030 #include <linux/of.h>
0031 #include <linux/of_address.h>
0032 #include <linux/of_irq.h>
0033 #include <linux/acpi.h>
0034 #include <linux/irqdomain.h>
0035 #include <linux/interrupt.h>
0036 #include <linux/percpu.h>
0037 #include <linux/seq_file.h>
0038 #include <linux/slab.h>
0039 #include <linux/irqchip.h>
0040 #include <linux/irqchip/chained_irq.h>
0041 #include <linux/irqchip/arm-gic.h>
0042
0043 #include <asm/cputype.h>
0044 #include <asm/irq.h>
0045 #include <asm/exception.h>
0046 #include <asm/smp_plat.h>
0047 #include <asm/virt.h>
0048
0049 #include "irq-gic-common.h"
0050
0051 #ifdef CONFIG_ARM64
0052 #include <asm/cpufeature.h>
0053
0054 static void gic_check_cpu_features(void)
0055 {
0056 WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
0057 TAINT_CPU_OUT_OF_SPEC,
0058 "GICv3 system registers enabled, broken firmware!\n");
0059 }
0060 #else
0061 #define gic_check_cpu_features() do { } while(0)
0062 #endif
0063
0064 union gic_base {
0065 void __iomem *common_base;
0066 void __percpu * __iomem *percpu_base;
0067 };
0068
0069 struct gic_chip_data {
0070 union gic_base dist_base;
0071 union gic_base cpu_base;
0072 void __iomem *raw_dist_base;
0073 void __iomem *raw_cpu_base;
0074 u32 percpu_offset;
0075 #if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
0076 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
0077 u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
0078 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
0079 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
0080 u32 __percpu *saved_ppi_enable;
0081 u32 __percpu *saved_ppi_active;
0082 u32 __percpu *saved_ppi_conf;
0083 #endif
0084 struct irq_domain *domain;
0085 unsigned int gic_irqs;
0086 };
0087
0088 #ifdef CONFIG_BL_SWITCHER
0089
0090 static DEFINE_RAW_SPINLOCK(cpu_map_lock);
0091
0092 #define gic_lock_irqsave(f) \
0093 raw_spin_lock_irqsave(&cpu_map_lock, (f))
0094 #define gic_unlock_irqrestore(f) \
0095 raw_spin_unlock_irqrestore(&cpu_map_lock, (f))
0096
0097 #define gic_lock() raw_spin_lock(&cpu_map_lock)
0098 #define gic_unlock() raw_spin_unlock(&cpu_map_lock)
0099
0100 #else
0101
0102 #define gic_lock_irqsave(f) do { (void)(f); } while(0)
0103 #define gic_unlock_irqrestore(f) do { (void)(f); } while(0)
0104
0105 #define gic_lock() do { } while(0)
0106 #define gic_unlock() do { } while(0)
0107
0108 #endif
0109
0110 static DEFINE_STATIC_KEY_FALSE(needs_rmw_access);
0111
0112
0113
0114
0115
0116
0117 #define NR_GIC_CPU_IF 8
0118 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
0119
0120 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
0121
0122 static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly;
0123
0124 static struct gic_kvm_info gic_v2_kvm_info __initdata;
0125
0126 static DEFINE_PER_CPU(u32, sgi_intid);
0127
0128 #ifdef CONFIG_GIC_NON_BANKED
0129 static DEFINE_STATIC_KEY_FALSE(frankengic_key);
0130
0131 static void enable_frankengic(void)
0132 {
0133 static_branch_enable(&frankengic_key);
0134 }
0135
0136 static inline void __iomem *__get_base(union gic_base *base)
0137 {
0138 if (static_branch_unlikely(&frankengic_key))
0139 return raw_cpu_read(*base->percpu_base);
0140
0141 return base->common_base;
0142 }
0143
0144 #define gic_data_dist_base(d) __get_base(&(d)->dist_base)
0145 #define gic_data_cpu_base(d) __get_base(&(d)->cpu_base)
0146 #else
0147 #define gic_data_dist_base(d) ((d)->dist_base.common_base)
0148 #define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
0149 #define enable_frankengic() do { } while(0)
0150 #endif
0151
0152 static inline void __iomem *gic_dist_base(struct irq_data *d)
0153 {
0154 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
0155 return gic_data_dist_base(gic_data);
0156 }
0157
0158 static inline void __iomem *gic_cpu_base(struct irq_data *d)
0159 {
0160 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
0161 return gic_data_cpu_base(gic_data);
0162 }
0163
0164 static inline unsigned int gic_irq(struct irq_data *d)
0165 {
0166 return d->hwirq;
0167 }
0168
0169 static inline bool cascading_gic_irq(struct irq_data *d)
0170 {
0171 void *data = irq_data_get_irq_handler_data(d);
0172
0173
0174
0175
0176
0177 return data != NULL;
0178 }
0179
0180
0181
0182
0183 static void gic_poke_irq(struct irq_data *d, u32 offset)
0184 {
0185 u32 mask = 1 << (gic_irq(d) % 32);
0186 writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4);
0187 }
0188
0189 static int gic_peek_irq(struct irq_data *d, u32 offset)
0190 {
0191 u32 mask = 1 << (gic_irq(d) % 32);
0192 return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask);
0193 }
0194
0195 static void gic_mask_irq(struct irq_data *d)
0196 {
0197 gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
0198 }
0199
0200 static void gic_eoimode1_mask_irq(struct irq_data *d)
0201 {
0202 gic_mask_irq(d);
0203
0204
0205
0206
0207
0208
0209
0210
0211 if (irqd_is_forwarded_to_vcpu(d))
0212 gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR);
0213 }
0214
0215 static void gic_unmask_irq(struct irq_data *d)
0216 {
0217 gic_poke_irq(d, GIC_DIST_ENABLE_SET);
0218 }
0219
0220 static void gic_eoi_irq(struct irq_data *d)
0221 {
0222 u32 hwirq = gic_irq(d);
0223
0224 if (hwirq < 16)
0225 hwirq = this_cpu_read(sgi_intid);
0226
0227 writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_EOI);
0228 }
0229
0230 static void gic_eoimode1_eoi_irq(struct irq_data *d)
0231 {
0232 u32 hwirq = gic_irq(d);
0233
0234
0235 if (irqd_is_forwarded_to_vcpu(d))
0236 return;
0237
0238 if (hwirq < 16)
0239 hwirq = this_cpu_read(sgi_intid);
0240
0241 writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
0242 }
0243
0244 static int gic_irq_set_irqchip_state(struct irq_data *d,
0245 enum irqchip_irq_state which, bool val)
0246 {
0247 u32 reg;
0248
0249 switch (which) {
0250 case IRQCHIP_STATE_PENDING:
0251 reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR;
0252 break;
0253
0254 case IRQCHIP_STATE_ACTIVE:
0255 reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR;
0256 break;
0257
0258 case IRQCHIP_STATE_MASKED:
0259 reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET;
0260 break;
0261
0262 default:
0263 return -EINVAL;
0264 }
0265
0266 gic_poke_irq(d, reg);
0267 return 0;
0268 }
0269
0270 static int gic_irq_get_irqchip_state(struct irq_data *d,
0271 enum irqchip_irq_state which, bool *val)
0272 {
0273 switch (which) {
0274 case IRQCHIP_STATE_PENDING:
0275 *val = gic_peek_irq(d, GIC_DIST_PENDING_SET);
0276 break;
0277
0278 case IRQCHIP_STATE_ACTIVE:
0279 *val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET);
0280 break;
0281
0282 case IRQCHIP_STATE_MASKED:
0283 *val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET);
0284 break;
0285
0286 default:
0287 return -EINVAL;
0288 }
0289
0290 return 0;
0291 }
0292
0293 static int gic_set_type(struct irq_data *d, unsigned int type)
0294 {
0295 void __iomem *base = gic_dist_base(d);
0296 unsigned int gicirq = gic_irq(d);
0297 int ret;
0298
0299
0300 if (gicirq < 16)
0301 return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
0302
0303
0304 if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
0305 type != IRQ_TYPE_EDGE_RISING)
0306 return -EINVAL;
0307
0308 ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG, NULL);
0309 if (ret && gicirq < 32) {
0310
0311 pr_warn("GIC: PPI%d is secure or misconfigured\n", gicirq - 16);
0312 ret = 0;
0313 }
0314
0315 return ret;
0316 }
0317
0318 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
0319 {
0320
0321 if (cascading_gic_irq(d) || gic_irq(d) < 16)
0322 return -EINVAL;
0323
0324 if (vcpu)
0325 irqd_set_forwarded_to_vcpu(d);
0326 else
0327 irqd_clr_forwarded_to_vcpu(d);
0328 return 0;
0329 }
0330
0331 static int gic_retrigger(struct irq_data *data)
0332 {
0333 return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
0334 }
0335
0336 static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
0337 {
0338 u32 irqstat, irqnr;
0339 struct gic_chip_data *gic = &gic_data[0];
0340 void __iomem *cpu_base = gic_data_cpu_base(gic);
0341
0342 do {
0343 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
0344 irqnr = irqstat & GICC_IAR_INT_ID_MASK;
0345
0346 if (unlikely(irqnr >= 1020))
0347 break;
0348
0349 if (static_branch_likely(&supports_deactivate_key))
0350 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
0351 isb();
0352
0353
0354
0355
0356
0357
0358
0359 if (irqnr <= 15) {
0360 smp_rmb();
0361
0362
0363
0364
0365
0366
0367
0368
0369 this_cpu_write(sgi_intid, irqstat);
0370 }
0371
0372 generic_handle_domain_irq(gic->domain, irqnr);
0373 } while (1);
0374 }
0375
0376 static void gic_handle_cascade_irq(struct irq_desc *desc)
0377 {
0378 struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
0379 struct irq_chip *chip = irq_desc_get_chip(desc);
0380 unsigned int gic_irq;
0381 unsigned long status;
0382 int ret;
0383
0384 chained_irq_enter(chip, desc);
0385
0386 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
0387
0388 gic_irq = (status & GICC_IAR_INT_ID_MASK);
0389 if (gic_irq == GICC_INT_SPURIOUS)
0390 goto out;
0391
0392 isb();
0393 ret = generic_handle_domain_irq(chip_data->domain, gic_irq);
0394 if (unlikely(ret))
0395 handle_bad_irq(desc);
0396 out:
0397 chained_irq_exit(chip, desc);
0398 }
0399
0400 static void gic_irq_print_chip(struct irq_data *d, struct seq_file *p)
0401 {
0402 struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
0403
0404 if (gic->domain->dev)
0405 seq_printf(p, gic->domain->dev->of_node->name);
0406 else
0407 seq_printf(p, "GIC-%d", (int)(gic - &gic_data[0]));
0408 }
0409
0410 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
0411 {
0412 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
0413 irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq,
0414 &gic_data[gic_nr]);
0415 }
0416
0417 static u8 gic_get_cpumask(struct gic_chip_data *gic)
0418 {
0419 void __iomem *base = gic_data_dist_base(gic);
0420 u32 mask, i;
0421
0422 for (i = mask = 0; i < 32; i += 4) {
0423 mask = readl_relaxed(base + GIC_DIST_TARGET + i);
0424 mask |= mask >> 16;
0425 mask |= mask >> 8;
0426 if (mask)
0427 break;
0428 }
0429
0430 if (!mask && num_possible_cpus() > 1)
0431 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
0432
0433 return mask;
0434 }
0435
0436 static bool gic_check_gicv2(void __iomem *base)
0437 {
0438 u32 val = readl_relaxed(base + GIC_CPU_IDENT);
0439 return (val & 0xff0fff) == 0x02043B;
0440 }
0441
0442 static void gic_cpu_if_up(struct gic_chip_data *gic)
0443 {
0444 void __iomem *cpu_base = gic_data_cpu_base(gic);
0445 u32 bypass = 0;
0446 u32 mode = 0;
0447 int i;
0448
0449 if (gic == &gic_data[0] && static_branch_likely(&supports_deactivate_key))
0450 mode = GIC_CPU_CTRL_EOImodeNS;
0451
0452 if (gic_check_gicv2(cpu_base))
0453 for (i = 0; i < 4; i++)
0454 writel_relaxed(0, cpu_base + GIC_CPU_ACTIVEPRIO + i * 4);
0455
0456
0457
0458
0459 bypass = readl(cpu_base + GIC_CPU_CTRL);
0460 bypass &= GICC_DIS_BYPASS_MASK;
0461
0462 writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
0463 }
0464
0465
0466 static void gic_dist_init(struct gic_chip_data *gic)
0467 {
0468 unsigned int i;
0469 u32 cpumask;
0470 unsigned int gic_irqs = gic->gic_irqs;
0471 void __iomem *base = gic_data_dist_base(gic);
0472
0473 writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL);
0474
0475
0476
0477
0478 cpumask = gic_get_cpumask(gic);
0479 cpumask |= cpumask << 8;
0480 cpumask |= cpumask << 16;
0481 for (i = 32; i < gic_irqs; i += 4)
0482 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
0483
0484 gic_dist_config(base, gic_irqs, NULL);
0485
0486 writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
0487 }
0488
0489 static int gic_cpu_init(struct gic_chip_data *gic)
0490 {
0491 void __iomem *dist_base = gic_data_dist_base(gic);
0492 void __iomem *base = gic_data_cpu_base(gic);
0493 unsigned int cpu_mask, cpu = smp_processor_id();
0494 int i;
0495
0496
0497
0498
0499
0500
0501 if (gic == &gic_data[0]) {
0502
0503
0504
0505 if (WARN_ON(cpu >= NR_GIC_CPU_IF))
0506 return -EINVAL;
0507
0508 gic_check_cpu_features();
0509 cpu_mask = gic_get_cpumask(gic);
0510 gic_cpu_map[cpu] = cpu_mask;
0511
0512
0513
0514
0515
0516 for (i = 0; i < NR_GIC_CPU_IF; i++)
0517 if (i != cpu)
0518 gic_cpu_map[i] &= ~cpu_mask;
0519 }
0520
0521 gic_cpu_config(dist_base, 32, NULL);
0522
0523 writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
0524 gic_cpu_if_up(gic);
0525
0526 return 0;
0527 }
0528
0529 int gic_cpu_if_down(unsigned int gic_nr)
0530 {
0531 void __iomem *cpu_base;
0532 u32 val = 0;
0533
0534 if (gic_nr >= CONFIG_ARM_GIC_MAX_NR)
0535 return -EINVAL;
0536
0537 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
0538 val = readl(cpu_base + GIC_CPU_CTRL);
0539 val &= ~GICC_ENABLE;
0540 writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
0541
0542 return 0;
0543 }
0544
0545 #if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
0546
0547
0548
0549
0550
0551
0552 void gic_dist_save(struct gic_chip_data *gic)
0553 {
0554 unsigned int gic_irqs;
0555 void __iomem *dist_base;
0556 int i;
0557
0558 if (WARN_ON(!gic))
0559 return;
0560
0561 gic_irqs = gic->gic_irqs;
0562 dist_base = gic_data_dist_base(gic);
0563
0564 if (!dist_base)
0565 return;
0566
0567 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
0568 gic->saved_spi_conf[i] =
0569 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
0570
0571 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
0572 gic->saved_spi_target[i] =
0573 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
0574
0575 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
0576 gic->saved_spi_enable[i] =
0577 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
0578
0579 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
0580 gic->saved_spi_active[i] =
0581 readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
0582 }
0583
0584
0585
0586
0587
0588
0589
0590
0591 void gic_dist_restore(struct gic_chip_data *gic)
0592 {
0593 unsigned int gic_irqs;
0594 unsigned int i;
0595 void __iomem *dist_base;
0596
0597 if (WARN_ON(!gic))
0598 return;
0599
0600 gic_irqs = gic->gic_irqs;
0601 dist_base = gic_data_dist_base(gic);
0602
0603 if (!dist_base)
0604 return;
0605
0606 writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
0607
0608 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
0609 writel_relaxed(gic->saved_spi_conf[i],
0610 dist_base + GIC_DIST_CONFIG + i * 4);
0611
0612 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
0613 writel_relaxed(GICD_INT_DEF_PRI_X4,
0614 dist_base + GIC_DIST_PRI + i * 4);
0615
0616 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
0617 writel_relaxed(gic->saved_spi_target[i],
0618 dist_base + GIC_DIST_TARGET + i * 4);
0619
0620 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
0621 writel_relaxed(GICD_INT_EN_CLR_X32,
0622 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
0623 writel_relaxed(gic->saved_spi_enable[i],
0624 dist_base + GIC_DIST_ENABLE_SET + i * 4);
0625 }
0626
0627 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
0628 writel_relaxed(GICD_INT_EN_CLR_X32,
0629 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
0630 writel_relaxed(gic->saved_spi_active[i],
0631 dist_base + GIC_DIST_ACTIVE_SET + i * 4);
0632 }
0633
0634 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
0635 }
0636
0637 void gic_cpu_save(struct gic_chip_data *gic)
0638 {
0639 int i;
0640 u32 *ptr;
0641 void __iomem *dist_base;
0642 void __iomem *cpu_base;
0643
0644 if (WARN_ON(!gic))
0645 return;
0646
0647 dist_base = gic_data_dist_base(gic);
0648 cpu_base = gic_data_cpu_base(gic);
0649
0650 if (!dist_base || !cpu_base)
0651 return;
0652
0653 ptr = raw_cpu_ptr(gic->saved_ppi_enable);
0654 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
0655 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
0656
0657 ptr = raw_cpu_ptr(gic->saved_ppi_active);
0658 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
0659 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
0660
0661 ptr = raw_cpu_ptr(gic->saved_ppi_conf);
0662 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
0663 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
0664
0665 }
0666
0667 void gic_cpu_restore(struct gic_chip_data *gic)
0668 {
0669 int i;
0670 u32 *ptr;
0671 void __iomem *dist_base;
0672 void __iomem *cpu_base;
0673
0674 if (WARN_ON(!gic))
0675 return;
0676
0677 dist_base = gic_data_dist_base(gic);
0678 cpu_base = gic_data_cpu_base(gic);
0679
0680 if (!dist_base || !cpu_base)
0681 return;
0682
0683 ptr = raw_cpu_ptr(gic->saved_ppi_enable);
0684 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
0685 writel_relaxed(GICD_INT_EN_CLR_X32,
0686 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
0687 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
0688 }
0689
0690 ptr = raw_cpu_ptr(gic->saved_ppi_active);
0691 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
0692 writel_relaxed(GICD_INT_EN_CLR_X32,
0693 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
0694 writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
0695 }
0696
0697 ptr = raw_cpu_ptr(gic->saved_ppi_conf);
0698 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
0699 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
0700
0701 for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
0702 writel_relaxed(GICD_INT_DEF_PRI_X4,
0703 dist_base + GIC_DIST_PRI + i * 4);
0704
0705 writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
0706 gic_cpu_if_up(gic);
0707 }
0708
0709 static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
0710 {
0711 int i;
0712
0713 for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) {
0714 switch (cmd) {
0715 case CPU_PM_ENTER:
0716 gic_cpu_save(&gic_data[i]);
0717 break;
0718 case CPU_PM_ENTER_FAILED:
0719 case CPU_PM_EXIT:
0720 gic_cpu_restore(&gic_data[i]);
0721 break;
0722 case CPU_CLUSTER_PM_ENTER:
0723 gic_dist_save(&gic_data[i]);
0724 break;
0725 case CPU_CLUSTER_PM_ENTER_FAILED:
0726 case CPU_CLUSTER_PM_EXIT:
0727 gic_dist_restore(&gic_data[i]);
0728 break;
0729 }
0730 }
0731
0732 return NOTIFY_OK;
0733 }
0734
0735 static struct notifier_block gic_notifier_block = {
0736 .notifier_call = gic_notifier,
0737 };
0738
0739 static int gic_pm_init(struct gic_chip_data *gic)
0740 {
0741 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
0742 sizeof(u32));
0743 if (WARN_ON(!gic->saved_ppi_enable))
0744 return -ENOMEM;
0745
0746 gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
0747 sizeof(u32));
0748 if (WARN_ON(!gic->saved_ppi_active))
0749 goto free_ppi_enable;
0750
0751 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
0752 sizeof(u32));
0753 if (WARN_ON(!gic->saved_ppi_conf))
0754 goto free_ppi_active;
0755
0756 if (gic == &gic_data[0])
0757 cpu_pm_register_notifier(&gic_notifier_block);
0758
0759 return 0;
0760
0761 free_ppi_active:
0762 free_percpu(gic->saved_ppi_active);
0763 free_ppi_enable:
0764 free_percpu(gic->saved_ppi_enable);
0765
0766 return -ENOMEM;
0767 }
0768 #else
0769 static int gic_pm_init(struct gic_chip_data *gic)
0770 {
0771 return 0;
0772 }
0773 #endif
0774
0775 #ifdef CONFIG_SMP
0776 static void rmw_writeb(u8 bval, void __iomem *addr)
0777 {
0778 static DEFINE_RAW_SPINLOCK(rmw_lock);
0779 unsigned long offset = (unsigned long)addr & 3UL;
0780 unsigned long shift = offset * 8;
0781 unsigned long flags;
0782 u32 val;
0783
0784 raw_spin_lock_irqsave(&rmw_lock, flags);
0785
0786 addr -= offset;
0787 val = readl_relaxed(addr);
0788 val &= ~GENMASK(shift + 7, shift);
0789 val |= bval << shift;
0790 writel_relaxed(val, addr);
0791
0792 raw_spin_unlock_irqrestore(&rmw_lock, flags);
0793 }
0794
0795 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
0796 bool force)
0797 {
0798 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
0799 struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
0800 unsigned int cpu;
0801
0802 if (unlikely(gic != &gic_data[0]))
0803 return -EINVAL;
0804
0805 if (!force)
0806 cpu = cpumask_any_and(mask_val, cpu_online_mask);
0807 else
0808 cpu = cpumask_first(mask_val);
0809
0810 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
0811 return -EINVAL;
0812
0813 if (static_branch_unlikely(&needs_rmw_access))
0814 rmw_writeb(gic_cpu_map[cpu], reg);
0815 else
0816 writeb_relaxed(gic_cpu_map[cpu], reg);
0817 irq_data_update_effective_affinity(d, cpumask_of(cpu));
0818
0819 return IRQ_SET_MASK_OK_DONE;
0820 }
0821
0822 static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
0823 {
0824 int cpu;
0825 unsigned long flags, map = 0;
0826
0827 if (unlikely(nr_cpu_ids == 1)) {
0828
0829 writel_relaxed(2 << 24 | d->hwirq,
0830 gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
0831 return;
0832 }
0833
0834 gic_lock_irqsave(flags);
0835
0836
0837 for_each_cpu(cpu, mask)
0838 map |= gic_cpu_map[cpu];
0839
0840
0841
0842
0843
0844 dmb(ishst);
0845
0846
0847 writel_relaxed(map << 16 | d->hwirq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
0848
0849 gic_unlock_irqrestore(flags);
0850 }
0851
0852 static int gic_starting_cpu(unsigned int cpu)
0853 {
0854 gic_cpu_init(&gic_data[0]);
0855 return 0;
0856 }
0857
0858 static __init void gic_smp_init(void)
0859 {
0860 struct irq_fwspec sgi_fwspec = {
0861 .fwnode = gic_data[0].domain->fwnode,
0862 .param_count = 1,
0863 };
0864 int base_sgi;
0865
0866 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
0867 "irqchip/arm/gic:starting",
0868 gic_starting_cpu, NULL);
0869
0870 base_sgi = __irq_domain_alloc_irqs(gic_data[0].domain, -1, 8,
0871 NUMA_NO_NODE, &sgi_fwspec,
0872 false, NULL);
0873 if (WARN_ON(base_sgi <= 0))
0874 return;
0875
0876 set_smp_ipi_range(base_sgi, 8);
0877 }
0878 #else
0879 #define gic_smp_init() do { } while(0)
0880 #define gic_set_affinity NULL
0881 #define gic_ipi_send_mask NULL
0882 #endif
0883
0884 static const struct irq_chip gic_chip = {
0885 .irq_mask = gic_mask_irq,
0886 .irq_unmask = gic_unmask_irq,
0887 .irq_eoi = gic_eoi_irq,
0888 .irq_set_type = gic_set_type,
0889 .irq_retrigger = gic_retrigger,
0890 .irq_set_affinity = gic_set_affinity,
0891 .ipi_send_mask = gic_ipi_send_mask,
0892 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
0893 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
0894 .irq_print_chip = gic_irq_print_chip,
0895 .flags = IRQCHIP_SET_TYPE_MASKED |
0896 IRQCHIP_SKIP_SET_WAKE |
0897 IRQCHIP_MASK_ON_SUSPEND,
0898 };
0899
0900 static const struct irq_chip gic_chip_mode1 = {
0901 .name = "GICv2",
0902 .irq_mask = gic_eoimode1_mask_irq,
0903 .irq_unmask = gic_unmask_irq,
0904 .irq_eoi = gic_eoimode1_eoi_irq,
0905 .irq_set_type = gic_set_type,
0906 .irq_retrigger = gic_retrigger,
0907 .irq_set_affinity = gic_set_affinity,
0908 .ipi_send_mask = gic_ipi_send_mask,
0909 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
0910 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
0911 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
0912 .flags = IRQCHIP_SET_TYPE_MASKED |
0913 IRQCHIP_SKIP_SET_WAKE |
0914 IRQCHIP_MASK_ON_SUSPEND,
0915 };
0916
0917 #ifdef CONFIG_BL_SWITCHER
0918
0919
0920
0921
0922
0923
0924 void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
0925 {
0926 BUG_ON(cpu_id >= NR_GIC_CPU_IF);
0927 cpu_id = 1 << cpu_id;
0928
0929 writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
0930 }
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941 int gic_get_cpu_id(unsigned int cpu)
0942 {
0943 unsigned int cpu_bit;
0944
0945 if (cpu >= NR_GIC_CPU_IF)
0946 return -1;
0947 cpu_bit = gic_cpu_map[cpu];
0948 if (cpu_bit & (cpu_bit - 1))
0949 return -1;
0950 return __ffs(cpu_bit);
0951 }
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963 void gic_migrate_target(unsigned int new_cpu_id)
0964 {
0965 unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
0966 void __iomem *dist_base;
0967 int i, ror_val, cpu = smp_processor_id();
0968 u32 val, cur_target_mask, active_mask;
0969
0970 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
0971
0972 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
0973 if (!dist_base)
0974 return;
0975 gic_irqs = gic_data[gic_nr].gic_irqs;
0976
0977 cur_cpu_id = __ffs(gic_cpu_map[cpu]);
0978 cur_target_mask = 0x01010101 << cur_cpu_id;
0979 ror_val = (cur_cpu_id - new_cpu_id) & 31;
0980
0981 gic_lock();
0982
0983
0984 gic_cpu_map[cpu] = 1 << new_cpu_id;
0985
0986
0987
0988
0989
0990
0991 for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
0992 val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
0993 active_mask = val & cur_target_mask;
0994 if (active_mask) {
0995 val &= ~active_mask;
0996 val |= ror32(active_mask, ror_val);
0997 writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
0998 }
0999 }
1000
1001 gic_unlock();
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013 for (i = 0; i < 16; i += 4) {
1014 int j;
1015 val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
1016 if (!val)
1017 continue;
1018 writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
1019 for (j = i; j < i + 4; j++) {
1020 if (val & 0xff)
1021 writel_relaxed((1 << (new_cpu_id + 16)) | j,
1022 dist_base + GIC_DIST_SOFTINT);
1023 val >>= 8;
1024 }
1025 }
1026 }
1027
1028
1029
1030
1031
1032
1033
1034 static unsigned long gic_dist_physaddr;
1035
1036 unsigned long gic_get_sgir_physaddr(void)
1037 {
1038 if (!gic_dist_physaddr)
1039 return 0;
1040 return gic_dist_physaddr + GIC_DIST_SOFTINT;
1041 }
1042
1043 static void __init gic_init_physaddr(struct device_node *node)
1044 {
1045 struct resource res;
1046 if (of_address_to_resource(node, 0, &res) == 0) {
1047 gic_dist_physaddr = res.start;
1048 pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
1049 }
1050 }
1051
1052 #else
1053 #define gic_init_physaddr(node) do { } while (0)
1054 #endif
1055
1056 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1057 irq_hw_number_t hw)
1058 {
1059 struct gic_chip_data *gic = d->host_data;
1060 struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
1061 const struct irq_chip *chip;
1062
1063 chip = (static_branch_likely(&supports_deactivate_key) &&
1064 gic == &gic_data[0]) ? &gic_chip_mode1 : &gic_chip;
1065
1066 switch (hw) {
1067 case 0 ... 31:
1068 irq_set_percpu_devid(irq);
1069 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1070 handle_percpu_devid_irq, NULL, NULL);
1071 break;
1072 default:
1073 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1074 handle_fasteoi_irq, NULL, NULL);
1075 irq_set_probe(irq);
1076 irqd_set_single_target(irqd);
1077 break;
1078 }
1079
1080
1081 irqd_set_handle_enforce_irqctx(irqd);
1082 return 0;
1083 }
1084
1085 static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
1086 {
1087 }
1088
1089 static int gic_irq_domain_translate(struct irq_domain *d,
1090 struct irq_fwspec *fwspec,
1091 unsigned long *hwirq,
1092 unsigned int *type)
1093 {
1094 if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
1095 *hwirq = fwspec->param[0];
1096 *type = IRQ_TYPE_EDGE_RISING;
1097 return 0;
1098 }
1099
1100 if (is_of_node(fwspec->fwnode)) {
1101 if (fwspec->param_count < 3)
1102 return -EINVAL;
1103
1104 switch (fwspec->param[0]) {
1105 case 0:
1106 *hwirq = fwspec->param[1] + 32;
1107 break;
1108 case 1:
1109 *hwirq = fwspec->param[1] + 16;
1110 break;
1111 default:
1112 return -EINVAL;
1113 }
1114
1115 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1116
1117
1118 WARN(*type == IRQ_TYPE_NONE,
1119 "HW irq %ld has invalid type\n", *hwirq);
1120 return 0;
1121 }
1122
1123 if (is_fwnode_irqchip(fwspec->fwnode)) {
1124 if(fwspec->param_count != 2)
1125 return -EINVAL;
1126
1127 if (fwspec->param[0] < 16) {
1128 pr_err(FW_BUG "Illegal GSI%d translation request\n",
1129 fwspec->param[0]);
1130 return -EINVAL;
1131 }
1132
1133 *hwirq = fwspec->param[0];
1134 *type = fwspec->param[1];
1135
1136 WARN(*type == IRQ_TYPE_NONE,
1137 "HW irq %ld has invalid type\n", *hwirq);
1138 return 0;
1139 }
1140
1141 return -EINVAL;
1142 }
1143
1144 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1145 unsigned int nr_irqs, void *arg)
1146 {
1147 int i, ret;
1148 irq_hw_number_t hwirq;
1149 unsigned int type = IRQ_TYPE_NONE;
1150 struct irq_fwspec *fwspec = arg;
1151
1152 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
1153 if (ret)
1154 return ret;
1155
1156 for (i = 0; i < nr_irqs; i++) {
1157 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1158 if (ret)
1159 return ret;
1160 }
1161
1162 return 0;
1163 }
1164
1165 static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
1166 .translate = gic_irq_domain_translate,
1167 .alloc = gic_irq_domain_alloc,
1168 .free = irq_domain_free_irqs_top,
1169 };
1170
1171 static const struct irq_domain_ops gic_irq_domain_ops = {
1172 .map = gic_irq_domain_map,
1173 .unmap = gic_irq_domain_unmap,
1174 };
1175
1176 static int gic_init_bases(struct gic_chip_data *gic,
1177 struct fwnode_handle *handle)
1178 {
1179 int gic_irqs, ret;
1180
1181 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
1182
1183 unsigned int cpu;
1184
1185 gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
1186 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
1187 if (WARN_ON(!gic->dist_base.percpu_base ||
1188 !gic->cpu_base.percpu_base)) {
1189 ret = -ENOMEM;
1190 goto error;
1191 }
1192
1193 for_each_possible_cpu(cpu) {
1194 u32 mpidr = cpu_logical_map(cpu);
1195 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
1196 unsigned long offset = gic->percpu_offset * core_id;
1197 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) =
1198 gic->raw_dist_base + offset;
1199 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) =
1200 gic->raw_cpu_base + offset;
1201 }
1202
1203 enable_frankengic();
1204 } else {
1205
1206 WARN(gic->percpu_offset,
1207 "GIC_NON_BANKED not enabled, ignoring %08x offset!",
1208 gic->percpu_offset);
1209 gic->dist_base.common_base = gic->raw_dist_base;
1210 gic->cpu_base.common_base = gic->raw_cpu_base;
1211 }
1212
1213
1214
1215
1216
1217 gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
1218 gic_irqs = (gic_irqs + 1) * 32;
1219 if (gic_irqs > 1020)
1220 gic_irqs = 1020;
1221 gic->gic_irqs = gic_irqs;
1222
1223 if (handle) {
1224 gic->domain = irq_domain_create_linear(handle, gic_irqs,
1225 &gic_irq_domain_hierarchy_ops,
1226 gic);
1227 } else {
1228
1229
1230
1231
1232 int irq_base;
1233
1234 gic_irqs -= 16;
1235
1236 irq_base = irq_alloc_descs(16, 16, gic_irqs,
1237 numa_node_id());
1238 if (irq_base < 0) {
1239 WARN(1, "Cannot allocate irq_descs @ IRQ16, assuming pre-allocated\n");
1240 irq_base = 16;
1241 }
1242
1243 gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base,
1244 16, &gic_irq_domain_ops, gic);
1245 }
1246
1247 if (WARN_ON(!gic->domain)) {
1248 ret = -ENODEV;
1249 goto error;
1250 }
1251
1252 gic_dist_init(gic);
1253 ret = gic_cpu_init(gic);
1254 if (ret)
1255 goto error;
1256
1257 ret = gic_pm_init(gic);
1258 if (ret)
1259 goto error;
1260
1261 return 0;
1262
1263 error:
1264 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
1265 free_percpu(gic->dist_base.percpu_base);
1266 free_percpu(gic->cpu_base.percpu_base);
1267 }
1268
1269 return ret;
1270 }
1271
1272 static int __init __gic_init_bases(struct gic_chip_data *gic,
1273 struct fwnode_handle *handle)
1274 {
1275 int i, ret;
1276
1277 if (WARN_ON(!gic || gic->domain))
1278 return -EINVAL;
1279
1280 if (gic == &gic_data[0]) {
1281
1282
1283
1284
1285
1286 for (i = 0; i < NR_GIC_CPU_IF; i++)
1287 gic_cpu_map[i] = 0xff;
1288
1289 set_handle_irq(gic_handle_irq);
1290 if (static_branch_likely(&supports_deactivate_key))
1291 pr_info("GIC: Using split EOI/Deactivate mode\n");
1292 }
1293
1294 ret = gic_init_bases(gic, handle);
1295 if (gic == &gic_data[0])
1296 gic_smp_init();
1297
1298 return ret;
1299 }
1300
1301 void __init gic_init(void __iomem *dist_base, void __iomem *cpu_base)
1302 {
1303 struct gic_chip_data *gic;
1304
1305
1306
1307
1308
1309 static_branch_disable(&supports_deactivate_key);
1310
1311 gic = &gic_data[0];
1312 gic->raw_dist_base = dist_base;
1313 gic->raw_cpu_base = cpu_base;
1314
1315 __gic_init_bases(gic, NULL);
1316 }
1317
1318 static void gic_teardown(struct gic_chip_data *gic)
1319 {
1320 if (WARN_ON(!gic))
1321 return;
1322
1323 if (gic->raw_dist_base)
1324 iounmap(gic->raw_dist_base);
1325 if (gic->raw_cpu_base)
1326 iounmap(gic->raw_cpu_base);
1327 }
1328
1329 #ifdef CONFIG_OF
1330 static int gic_cnt __initdata;
1331 static bool gicv2_force_probe;
1332
1333 static int __init gicv2_force_probe_cfg(char *buf)
1334 {
1335 return strtobool(buf, &gicv2_force_probe);
1336 }
1337 early_param("irqchip.gicv2_force_probe", gicv2_force_probe_cfg);
1338
1339 static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
1340 {
1341 struct resource cpuif_res;
1342
1343 of_address_to_resource(node, 1, &cpuif_res);
1344
1345 if (!is_hyp_mode_available())
1346 return false;
1347 if (resource_size(&cpuif_res) < SZ_8K) {
1348 void __iomem *alt;
1349
1350
1351
1352
1353 if (!gic_check_gicv2(*base))
1354 return false;
1355
1356 if (!gicv2_force_probe) {
1357 pr_warn("GIC: GICv2 detected, but range too small and irqchip.gicv2_force_probe not set\n");
1358 return false;
1359 }
1360
1361 alt = ioremap(cpuif_res.start, SZ_8K);
1362 if (!alt)
1363 return false;
1364 if (!gic_check_gicv2(alt + SZ_4K)) {
1365
1366
1367
1368
1369
1370 pr_warn("GIC: GICv2 at %pa, but range is too small (broken DT?), assuming 8kB\n",
1371 &cpuif_res.start);
1372 iounmap(*base);
1373 *base = alt;
1374 return true;
1375 }
1376
1377
1378
1379
1380
1381
1382
1383 iounmap(alt);
1384 alt = ioremap(cpuif_res.start, SZ_128K);
1385 if (!alt)
1386 return false;
1387 pr_warn("GIC: Aliased GICv2 at %pa, trying to find the canonical range over 128kB\n",
1388 &cpuif_res.start);
1389 cpuif_res.end = cpuif_res.start + SZ_128K -1;
1390 iounmap(*base);
1391 *base = alt;
1392 }
1393 if (resource_size(&cpuif_res) == SZ_128K) {
1394
1395
1396
1397
1398
1399 if (!gic_check_gicv2(*base) ||
1400 !gic_check_gicv2(*base + 0xf000))
1401 return false;
1402
1403
1404
1405
1406
1407
1408 *base += 0xf000;
1409 cpuif_res.start += 0xf000;
1410 pr_warn("GIC: Adjusting CPU interface base to %pa\n",
1411 &cpuif_res.start);
1412 }
1413
1414 return true;
1415 }
1416
1417 static bool gic_enable_rmw_access(void *data)
1418 {
1419
1420
1421
1422
1423
1424 if (of_machine_is_compatible("renesas,emev2")) {
1425 static_branch_enable(&needs_rmw_access);
1426 return true;
1427 }
1428
1429 return false;
1430 }
1431
1432 static const struct gic_quirk gic_quirks[] = {
1433 {
1434 .desc = "broken byte access",
1435 .compatible = "arm,pl390",
1436 .init = gic_enable_rmw_access,
1437 },
1438 { },
1439 };
1440
1441 static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
1442 {
1443 if (!gic || !node)
1444 return -EINVAL;
1445
1446 gic->raw_dist_base = of_iomap(node, 0);
1447 if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n"))
1448 goto error;
1449
1450 gic->raw_cpu_base = of_iomap(node, 1);
1451 if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n"))
1452 goto error;
1453
1454 if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
1455 gic->percpu_offset = 0;
1456
1457 gic_enable_of_quirks(node, gic_quirks, gic);
1458
1459 return 0;
1460
1461 error:
1462 gic_teardown(gic);
1463
1464 return -ENOMEM;
1465 }
1466
1467 int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
1468 {
1469 int ret;
1470
1471 if (!dev || !dev->of_node || !gic || !irq)
1472 return -EINVAL;
1473
1474 *gic = devm_kzalloc(dev, sizeof(**gic), GFP_KERNEL);
1475 if (!*gic)
1476 return -ENOMEM;
1477
1478 ret = gic_of_setup(*gic, dev->of_node);
1479 if (ret)
1480 return ret;
1481
1482 ret = gic_init_bases(*gic, &dev->of_node->fwnode);
1483 if (ret) {
1484 gic_teardown(*gic);
1485 return ret;
1486 }
1487
1488 irq_domain_set_pm_device((*gic)->domain, dev);
1489 irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic);
1490
1491 return 0;
1492 }
1493
1494 static void __init gic_of_setup_kvm_info(struct device_node *node)
1495 {
1496 int ret;
1497 struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
1498 struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
1499
1500 gic_v2_kvm_info.type = GIC_V2;
1501
1502 gic_v2_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1503 if (!gic_v2_kvm_info.maint_irq)
1504 return;
1505
1506 ret = of_address_to_resource(node, 2, vctrl_res);
1507 if (ret)
1508 return;
1509
1510 ret = of_address_to_resource(node, 3, vcpu_res);
1511 if (ret)
1512 return;
1513
1514 if (static_branch_likely(&supports_deactivate_key))
1515 vgic_set_kvm_info(&gic_v2_kvm_info);
1516 }
1517
1518 int __init
1519 gic_of_init(struct device_node *node, struct device_node *parent)
1520 {
1521 struct gic_chip_data *gic;
1522 int irq, ret;
1523
1524 if (WARN_ON(!node))
1525 return -ENODEV;
1526
1527 if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR))
1528 return -EINVAL;
1529
1530 gic = &gic_data[gic_cnt];
1531
1532 ret = gic_of_setup(gic, node);
1533 if (ret)
1534 return ret;
1535
1536
1537
1538
1539
1540 if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
1541 static_branch_disable(&supports_deactivate_key);
1542
1543 ret = __gic_init_bases(gic, &node->fwnode);
1544 if (ret) {
1545 gic_teardown(gic);
1546 return ret;
1547 }
1548
1549 if (!gic_cnt) {
1550 gic_init_physaddr(node);
1551 gic_of_setup_kvm_info(node);
1552 }
1553
1554 if (parent) {
1555 irq = irq_of_parse_and_map(node, 0);
1556 gic_cascade_irq(gic_cnt, irq);
1557 }
1558
1559 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1560 gicv2m_init(&node->fwnode, gic_data[gic_cnt].domain);
1561
1562 gic_cnt++;
1563 return 0;
1564 }
1565 IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
1566 IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init);
1567 IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init);
1568 IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
1569 IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
1570 IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
1571 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
1572 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
1573 IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init);
1574 #else
1575 int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
1576 {
1577 return -ENOTSUPP;
1578 }
1579 #endif
1580
1581 #ifdef CONFIG_ACPI
1582 static struct
1583 {
1584 phys_addr_t cpu_phys_base;
1585 u32 maint_irq;
1586 int maint_irq_mode;
1587 phys_addr_t vctrl_base;
1588 phys_addr_t vcpu_base;
1589 } acpi_data __initdata;
1590
1591 static int __init
1592 gic_acpi_parse_madt_cpu(union acpi_subtable_headers *header,
1593 const unsigned long end)
1594 {
1595 struct acpi_madt_generic_interrupt *processor;
1596 phys_addr_t gic_cpu_base;
1597 static int cpu_base_assigned;
1598
1599 processor = (struct acpi_madt_generic_interrupt *)header;
1600
1601 if (BAD_MADT_GICC_ENTRY(processor, end))
1602 return -EINVAL;
1603
1604
1605
1606
1607
1608 gic_cpu_base = processor->base_address;
1609 if (cpu_base_assigned && gic_cpu_base != acpi_data.cpu_phys_base)
1610 return -EINVAL;
1611
1612 acpi_data.cpu_phys_base = gic_cpu_base;
1613 acpi_data.maint_irq = processor->vgic_interrupt;
1614 acpi_data.maint_irq_mode = (processor->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
1615 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
1616 acpi_data.vctrl_base = processor->gich_base_address;
1617 acpi_data.vcpu_base = processor->gicv_base_address;
1618
1619 cpu_base_assigned = 1;
1620 return 0;
1621 }
1622
1623
1624 static int __init acpi_dummy_func(union acpi_subtable_headers *header,
1625 const unsigned long end)
1626 {
1627 return 0;
1628 }
1629
1630 static bool __init acpi_gic_redist_is_present(void)
1631 {
1632 return acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
1633 acpi_dummy_func, 0) > 0;
1634 }
1635
1636 static bool __init gic_validate_dist(struct acpi_subtable_header *header,
1637 struct acpi_probe_entry *ape)
1638 {
1639 struct acpi_madt_generic_distributor *dist;
1640 dist = (struct acpi_madt_generic_distributor *)header;
1641
1642 return (dist->version == ape->driver_data &&
1643 (dist->version != ACPI_MADT_GIC_VERSION_NONE ||
1644 !acpi_gic_redist_is_present()));
1645 }
1646
1647 #define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K)
1648 #define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K)
1649 #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
1650 #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
1651
1652 static void __init gic_acpi_setup_kvm_info(void)
1653 {
1654 int irq;
1655 struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
1656 struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
1657
1658 gic_v2_kvm_info.type = GIC_V2;
1659
1660 if (!acpi_data.vctrl_base)
1661 return;
1662
1663 vctrl_res->flags = IORESOURCE_MEM;
1664 vctrl_res->start = acpi_data.vctrl_base;
1665 vctrl_res->end = vctrl_res->start + ACPI_GICV2_VCTRL_MEM_SIZE - 1;
1666
1667 if (!acpi_data.vcpu_base)
1668 return;
1669
1670 vcpu_res->flags = IORESOURCE_MEM;
1671 vcpu_res->start = acpi_data.vcpu_base;
1672 vcpu_res->end = vcpu_res->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
1673
1674 irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
1675 acpi_data.maint_irq_mode,
1676 ACPI_ACTIVE_HIGH);
1677 if (irq <= 0)
1678 return;
1679
1680 gic_v2_kvm_info.maint_irq = irq;
1681
1682 vgic_set_kvm_info(&gic_v2_kvm_info);
1683 }
1684
1685 static struct fwnode_handle *gsi_domain_handle;
1686
1687 static struct fwnode_handle *gic_v2_get_gsi_domain_id(u32 gsi)
1688 {
1689 return gsi_domain_handle;
1690 }
1691
1692 static int __init gic_v2_acpi_init(union acpi_subtable_headers *header,
1693 const unsigned long end)
1694 {
1695 struct acpi_madt_generic_distributor *dist;
1696 struct gic_chip_data *gic = &gic_data[0];
1697 int count, ret;
1698
1699
1700 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1701 gic_acpi_parse_madt_cpu, 0);
1702 if (count <= 0) {
1703 pr_err("No valid GICC entries exist\n");
1704 return -EINVAL;
1705 }
1706
1707 gic->raw_cpu_base = ioremap(acpi_data.cpu_phys_base, ACPI_GIC_CPU_IF_MEM_SIZE);
1708 if (!gic->raw_cpu_base) {
1709 pr_err("Unable to map GICC registers\n");
1710 return -ENOMEM;
1711 }
1712
1713 dist = (struct acpi_madt_generic_distributor *)header;
1714 gic->raw_dist_base = ioremap(dist->base_address,
1715 ACPI_GICV2_DIST_MEM_SIZE);
1716 if (!gic->raw_dist_base) {
1717 pr_err("Unable to map GICD registers\n");
1718 gic_teardown(gic);
1719 return -ENOMEM;
1720 }
1721
1722
1723
1724
1725
1726
1727 if (!is_hyp_mode_available())
1728 static_branch_disable(&supports_deactivate_key);
1729
1730
1731
1732
1733 gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
1734 if (!gsi_domain_handle) {
1735 pr_err("Unable to allocate domain handle\n");
1736 gic_teardown(gic);
1737 return -ENOMEM;
1738 }
1739
1740 ret = __gic_init_bases(gic, gsi_domain_handle);
1741 if (ret) {
1742 pr_err("Failed to initialise GIC\n");
1743 irq_domain_free_fwnode(gsi_domain_handle);
1744 gic_teardown(gic);
1745 return ret;
1746 }
1747
1748 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v2_get_gsi_domain_id);
1749
1750 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1751 gicv2m_init(NULL, gic_data[0].domain);
1752
1753 if (static_branch_likely(&supports_deactivate_key))
1754 gic_acpi_setup_kvm_info();
1755
1756 return 0;
1757 }
1758 IRQCHIP_ACPI_DECLARE(gic_v2, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1759 gic_validate_dist, ACPI_MADT_GIC_VERSION_V2,
1760 gic_v2_acpi_init);
1761 IRQCHIP_ACPI_DECLARE(gic_v2_maybe, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1762 gic_validate_dist, ACPI_MADT_GIC_VERSION_NONE,
1763 gic_v2_acpi_init);
1764 #endif